From e729e648e4259940473e256dd4f9c8df99e774b0 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 17 Jan 2018 15:12:58 +0800 Subject: [PATCH] dpaa2: support layerscape This is an integrated patch for layerscape dpaa2 support. Signed-off-by: Bogdan Purcareata Signed-off-by: Ioana Radulescu Signed-off-by: Razvan Stefanescu Signed-off-by: costi Signed-off-by: Catalin Horghidan Signed-off-by: Mathew McBride Signed-off-by: Yangbo Lu --- drivers/soc/fsl/ls2-console/Kconfig | 4 + drivers/soc/fsl/ls2-console/Makefile | 1 + drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++ drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 + drivers/staging/fsl-dpaa2/ethernet/README | 186 ++ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 352 ++ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3516 ++++++++++++++++++++ drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 499 +++ drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 864 +++++ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 + drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 658 ++++ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1903 +++++++++++ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1053 ++++++ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++ drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 + drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 + drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 +++++ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 +++++++ drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 +++++++++++ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 + drivers/staging/fsl-dpaa2/evb/Makefile | 10 + drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++ drivers/staging/fsl-dpaa2/evb/evb.c | 1350 ++++++++ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 + drivers/staging/fsl-dpaa2/mac/Makefile | 10 + drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 + drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++ drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++ drivers/staging/fsl-dpaa2/mac/mac.c | 670 ++++ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 + drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 + drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++ drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 + drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++ 39 files changed, 23365 insertions(+) create mode 100644 drivers/soc/fsl/ls2-console/Kconfig create mode 100644 drivers/soc/fsl/ls2-console/Makefile create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c --- /dev/null +++ b/drivers/soc/fsl/ls2-console/Kconfig @@ -0,0 +1,4 @@ +config FSL_LS2_CONSOLE + tristate "Layerscape MC and AIOP console support" + depends on ARCH_LAYERSCAPE + default y --- /dev/null +++ b/drivers/soc/fsl/ls2-console/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o --- /dev/null +++ b/drivers/soc/fsl/ls2-console/ls2-console.c @@ -0,0 +1,284 @@ +/* Copyright 2015-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* SoC address for the MC firmware base low/high registers */ +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2 +/* MC firmware base low/high registers indexes */ +#define MCFBALR_OFFSET 0 +#define MCFBAHR_OFFSET 1 + +/* Bit mask used to obtain the most significant part of the MC base address */ +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF +/* Bit mask used to obtain the least significant part of the MC base address */ +#define MC_FW_LOW_ADDR_MASK 0xE0000000 + +#define MC_BUFFER_OFFSET 0x01000000 +#define MC_BUFFER_SIZE (1024*1024*16) +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET) + +#define AIOP_BUFFER_OFFSET 0x06000000 +#define AIOP_BUFFER_SIZE (1024*1024*16) +#define AIOP_OFFSET_DELTA (0) + +struct log_header { + char magic_word[8]; /* magic word */ + uint32_t buf_start; /* holds the 32-bit little-endian + * offset of the start of the buffer + */ + uint32_t buf_length; /* holds the 32-bit little-endian + * length of the buffer + */ + uint32_t last_byte; /* holds the 32-bit little-endian offset + * of the byte after the last byte that + * was written + */ + char reserved[44]; +}; + +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000 +#define LOG_VERSION_MAJOR 1 +#define LOG_VERSION_MINOR 0 + + +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); } + +struct console_data { + char *map_addr; + struct log_header *hdr; + char *start_addr; /* Start of buffer */ + char *end_addr; /* End of buffer */ + char *end_of_data; /* Current end of data */ + char *cur_ptr; /* Last data sent to console */ +}; + +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND)) + +static inline void __adjust_end(struct console_data *cd) +{ + cd->end_of_data = cd->start_addr + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte)); +} + +static inline void adjust_end(struct console_data *cd) +{ + invalidate(cd->hdr); + __adjust_end(cd); +} + +static inline uint64_t get_mc_fw_base_address(void) +{ + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS, + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE); + u64 mcfwbase = 0ULL; + + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK; + mcfwbase <<= 32; + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK; + iounmap(mcfbaregs); + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase); + return mcfwbase; +} + +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp, + u64 offset, u64 size, + uint8_t *emagic, uint8_t magic_len, + u32 offset_delta) +{ + struct console_data *cd; + uint8_t *magic; + uint32_t wrapped; + + cd = kmalloc(sizeof(*cd), GFP_KERNEL); + if (cd == NULL) + return -ENOMEM; + fp->private_data = cd; + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size); + + cd->hdr = (struct log_header *) cd->map_addr; + invalidate(cd->hdr); + + magic = cd->hdr->magic_word; + if (memcmp(magic, emagic, magic_len)) { + pr_info("magic didn't match!\n"); + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n", + emagic[0], emagic[1], emagic[2], emagic[3], + emagic[4], emagic[5], emagic[6], emagic[7]); + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n", + magic[0], magic[1], magic[2], magic[3], + magic[4], magic[5], magic[6], magic[7]); + kfree(cd); + iounmap(cd->map_addr); + return -EIO; + } + + cd->start_addr = cd->map_addr + + le32_to_cpu(cd->hdr->buf_start) - offset_delta; + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length); + + wrapped = le32_to_cpu(cd->hdr->last_byte) + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND; + + __adjust_end(cd); + if (wrapped && (cd->end_of_data != cd->end_addr)) + cd->cur_ptr = cd->end_of_data+1; + else + cd->cur_ptr = cd->start_addr; + + return 0; +} + +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp) +{ + uint8_t magic_word[] = { 0, 1, 'C', 'M' }; + + return fsl_ls2_generic_console_open(node, fp, + MC_BUFFER_OFFSET, MC_BUFFER_SIZE, + magic_word, sizeof(magic_word), + MC_OFFSET_DELTA); +} + +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp) +{ + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' }; + + return fsl_ls2_generic_console_open(node, fp, + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE, + magic_word, sizeof(magic_word), + AIOP_OFFSET_DELTA); +} + +static int fsl_ls2_console_close(struct inode *node, struct file *fp) +{ + struct console_data *cd = fp->private_data; + + iounmap(cd->map_addr); + kfree(cd); + return 0; +} + +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count, + loff_t *f_pos) +{ + struct console_data *cd = fp->private_data; + size_t bytes = 0; + char data; + + /* Check if we need to adjust the end of data addr */ + adjust_end(cd); + + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) { + if (((u64)cd->cur_ptr) % 64 == 0) + invalidate(cd->cur_ptr); + + data = *(cd->cur_ptr); + if (copy_to_user(&buf[bytes], &data, 1)) + return -EFAULT; + cd->cur_ptr++; + if (cd->cur_ptr >= cd->end_addr) + cd->cur_ptr = cd->start_addr; + ++bytes; + } + return bytes; +} + +static const struct file_operations fsl_ls2_mc_console_fops = { + .owner = THIS_MODULE, + .open = fsl_ls2_mc_console_open, + .release = fsl_ls2_console_close, + .read = fsl_ls2_console_read, +}; + +static struct miscdevice fsl_ls2_mc_console_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "fsl_mc_console", + .fops = &fsl_ls2_mc_console_fops +}; + +static const struct file_operations fsl_ls2_aiop_console_fops = { + .owner = THIS_MODULE, + .open = fsl_ls2_aiop_console_open, + .release = fsl_ls2_console_close, + .read = fsl_ls2_console_read, +}; + +static struct miscdevice fsl_ls2_aiop_console_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "fsl_aiop_console", + .fops = &fsl_ls2_aiop_console_fops +}; + +static int __init fsl_ls2_console_init(void) +{ + int err = 0; + + pr_info("Freescale LS2 console driver\n"); + err = misc_register(&fsl_ls2_mc_console_dev); + if (err) { + pr_err("fsl_mc_console: cannot register device\n"); + return err; + } + pr_info("fsl-ls2-console: device %s registered\n", + fsl_ls2_mc_console_dev.name); + + err = misc_register(&fsl_ls2_aiop_console_dev); + if (err) { + pr_err("fsl_aiop_console: cannot register device\n"); + return err; + } + pr_info("fsl-ls2-console: device %s registered\n", + fsl_ls2_aiop_console_dev.name); + + return 0; +} + +static void __exit fsl_ls2_console_exit(void) +{ + misc_deregister(&fsl_ls2_mc_console_dev); + + misc_deregister(&fsl_ls2_aiop_console_dev); +} + +module_init(fsl_ls2_console_init); +module_exit(fsl_ls2_console_exit); + +MODULE_AUTHOR("Roy Pledge "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Freescale LS2 console driver"); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Freescale DPAA2 Ethernet controller +# + +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o + +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o + +# Needed by the tracing framework +CFLAGS_dpaa2-eth.o := -I$(src) --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/README @@ -0,0 +1,186 @@ +Freescale DPAA2 Ethernet driver +=============================== + +This file provides documentation for the Freescale DPAA2 Ethernet driver. + + +Contents +======== + Supported Platforms + Architecture Overview + Creating a Network Interface + Features & Offloads + + +Supported Platforms +=================== +This driver provides networking support for Freescale DPAA2 SoCs, e.g. +LS2080A, LS2088A, LS1088A. + + +Architecture Overview +===================== +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block +representing network interfaces; instead, several separate hardware resources +concur to provide the networking functionality: + - network interfaces + - queues, channels + - buffer pools + - MAC/PHY + +All hardware resources are allocated and configured through the Management +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects +and exposes ABIs through which they can be configured and controlled. A few +hardware resources, like queues, do not have a corresponding MC object and +are treated as internal resources of other objects. + +For a more detailed description of the DPAA2 architecture and its object +abstractions see: + drivers/staging/fsl-mc/README.txt + +Each Linux net device is built on top of a Datapath Network Interface (DPNI) +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators +(DPCONs). + +Configuration interface: + + ----------------------- + | DPAA2 Ethernet Driver | + ----------------------- + . . . + . . . + . . . . . . . . . . . . + . . . + . . . + ---------- ---------- ----------- + | DPBP API | | DPNI API | | DPCON API | + ---------- ---------- ----------- + . . . software +=========== . ========== . ============ . =================== + . . . hardware + ------------------------------------------ + | MC hardware portals | + ------------------------------------------ + . . . + . . . + ------ ------ ------- + | DPBP | | DPNI | | DPCON | + ------ ------ ------- + +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs. +DPBPs represent hardware buffer pools. Packet I/O is performed in the context +of DPCON objects, using DPIO portals for managing and communicating with the +hardware resources. + +Datapath (I/O) interface: + + ----------------------------------------------- + | DPAA2 Ethernet Driver | + ----------------------------------------------- + | ^ ^ | | + | | | | | + enqueue| dequeue| data | dequeue| seed | + (Tx) | (Rx, TxC)| avail.| request| buffers| + | | notify| | | + | | | | | + V | | V V + ----------------------------------------------- + | DPIO Driver | + ----------------------------------------------- + | | | | | software + | | | | | ================ + | | | | | hardware + ----------------------------------------------- + | I/O hardware portals | + ----------------------------------------------- + | ^ ^ | | + | | | | | + | | | V | + V | ================ V + ---------------------- | ------------- + queues ---------------------- | | Buffer pool | + ---------------------- | ------------- + ======================= + Channel + +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data +availability notifications and buffer pool management. DPIOs are shared between +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data +frames, but must be affine to the CPUs for the purpose of traffic distribution. + +Frames are transmitted and received through hardware frame queues, which can be +grouped in channels for the purpose of hardware scheduling. The Ethernet driver +enqueues TX frames on egress queues and after transmission is complete a TX +confirmation frame is sent back to the CPU. + +When frames are available on ingress queues, a data availability notification +is sent to the CPU; notifications are raised per channel, so even if multiple +queues in the same channel have available frames, only one notification is sent. +After a channel fires a notification, is must be explicitly rearmed. + +Each network interface can have multiple Rx, Tx and confirmation queues affined +to CPUs, and one channel (DPCON) for each CPU that services at least one queue. +DPCONs are used to distribute ingress traffic to different CPUs via the cores' +affine DPIOs. + +The role of hardware buffer pools is storage of ingress frame data. Each network +interface has a privately owned buffer pool which it seeds with kernel allocated +buffers. + + +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC +object or to another DPNI through an internal link, but the connection is +managed by MC and completely transparent to the Ethernet driver. + + --------- --------- --------- + | eth if1 | | eth if2 | | eth ifn | + --------- --------- --------- + . . . + . . . + . . . + --------------------------- + | DPAA2 Ethernet Driver | + --------------------------- + . . . + . . . + . . . + ------ ------ ------ ------- + | DPNI | | DPNI | | DPNI | | DPMAC |----+ + ------ ------ ------ ------- | + | | | | | + | | | | ----- + =========== ================== | PHY | + ----- + +Creating a Network Interface +============================ +A net device is created for each DPNI object probed on the MC bus. Each DPNI has +a number of properties which determine the network interface configuration +options and associated hardware resources. + +DPNI objects (and the other DPAA2 objects needed for a network interface) can be +added to a container on the MC bus in one of two ways: statically, through a +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created +dynamically at runtime, via the DPAA2 objects APIs. + + +Features & Offloads +=================== +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames. +The checksum offloads can be independently configured on RX and TX through +ethtool. + +Hardware offload of unicast and multicast MAC filtering is supported on the +ingress path and permanently enabled. + +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support +is configurable via ethtool; on RX it is always enabled. + +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes. + +The Ethernet driver defines a static flow hashing scheme that distributes +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port, +L4 dst port. No user configuration is supported for now. + +Hardware specific statistics for the network interface as well as some +non-standard driver stats can be consulted through ethtool -S option. --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c @@ -0,0 +1,352 @@ + +/* Copyright 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "dpaa2-eth.h" +#include "dpaa2-eth-debugfs.h" + +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" + +static struct dentry *dpaa2_dbg_root; + +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct rtnl_link_stats64 *stats; + struct dpaa2_eth_drv_stats *extras; + int i; + + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", + "Tx SG", "Enq busy"); + + for_each_online_cpu(i) { + stats = per_cpu_ptr(priv->percpu_stats, i); + extras = per_cpu_ptr(priv->percpu_extras, i); + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", + i, + stats->rx_packets, + stats->rx_errors, + extras->rx_sg_frames, + stats->tx_packets, + stats->tx_errors, + extras->tx_conf_frames, + extras->tx_sg_frames, + extras->tx_portal_busy); + } + + return 0; +} + +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_cpu_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_cpu_ops = { + .open = dpaa2_dbg_cpu_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static char *fq_type_to_str(struct dpaa2_eth_fq *fq) +{ + switch (fq->type) { + case DPAA2_RX_FQ: + return "Rx"; + case DPAA2_TX_CONF_FQ: + return "Tx conf"; + case DPAA2_RX_ERR_FQ: + return "Rx err"; + default: + return "N/A"; + } +} + +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_fq *fq; + u32 fcnt, bcnt; + int i, err; + + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", + "VFQID", "CPU", "Traffic Class", "Type", "Frames", + "Pending frames", "Congestion"); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); + if (err) + fcnt = 0; + + seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n", + fq->fqid, + fq->target_cpu, + fq->tc, + fq_type_to_str(fq), + fq->stats.frames, + fcnt, + fq->stats.congestion_entry); + } + + return 0; +} + +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_fqs_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_fq_ops = { + .open = dpaa2_dbg_fqs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_channel *ch; + int i; + + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", + "CHID", "CPU", "Deq busy", "Frames", "CDANs", + "Avg frm/CDAN", "Buf count"); + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", + ch->ch_id, + ch->nctx.desired_cpu, + ch->stats.dequeue_portal_busy, + ch->stats.frames, + ch->stats.cdan, + ch->stats.frames / ch->stats.cdan, + ch->buf_count); + } + + return 0; +} + +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_ch_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_ch_ops = { + .open = dpaa2_dbg_ch_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct dpaa2_eth_priv *priv = file->private_data; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct dpaa2_eth_fq *fq; + struct dpaa2_eth_channel *ch; + int i; + + for_each_online_cpu(i) { + percpu_stats = per_cpu_ptr(priv->percpu_stats, i); + memset(percpu_stats, 0, sizeof(*percpu_stats)); + + percpu_extras = per_cpu_ptr(priv->percpu_extras, i); + memset(percpu_extras, 0, sizeof(*percpu_extras)); + } + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + memset(&fq->stats, 0, sizeof(fq->stats)); + } + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + memset(&ch->stats, 0, sizeof(ch->stats)); + } + + return count; +} + +static const struct file_operations dpaa2_dbg_reset_ops = { + .open = simple_open, + .write = dpaa2_dbg_reset_write, +}; + +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file, + const char __user *buf, + size_t count, loff_t *offset) +{ + struct dpaa2_eth_priv *priv = file->private_data; + int err; + + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token); + if (err) + netdev_err(priv->net_dev, + "dpni_reset_statistics() failed %d\n", err); + + return count; +} + +static const struct file_operations dpaa2_dbg_reset_mc_ops = { + .open = simple_open, + .write = dpaa2_dbg_reset_mc_write, +}; + +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) +{ + if (!dpaa2_dbg_root) + return; + + /* Create a directory for the interface */ + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, + dpaa2_dbg_root); + if (!priv->dbg.dir) { + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); + return; + } + + /* per-cpu stats file */ + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_cpu_ops); + if (!priv->dbg.cpu_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_cpu_stats; + } + + /* per-fq stats file */ + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_fq_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_fq_stats; + } + + /* per-fq stats file */ + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_ch_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_ch_stats; + } + + /* reset stats */ + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200, + priv->dbg.dir, priv, + &dpaa2_dbg_reset_ops); + if (!priv->dbg.reset_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_reset_stats; + } + + /* reset MC stats */ + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats", + 0222, priv->dbg.dir, priv, + &dpaa2_dbg_reset_mc_ops); + if (!priv->dbg.reset_mc_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_reset_mc_stats; + } + + return; + +err_reset_mc_stats: + debugfs_remove(priv->dbg.reset_stats); +err_reset_stats: + debugfs_remove(priv->dbg.ch_stats); +err_ch_stats: + debugfs_remove(priv->dbg.fq_stats); +err_fq_stats: + debugfs_remove(priv->dbg.cpu_stats); +err_cpu_stats: + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) +{ + debugfs_remove(priv->dbg.reset_mc_stats); + debugfs_remove(priv->dbg.reset_stats); + debugfs_remove(priv->dbg.fq_stats); + debugfs_remove(priv->dbg.ch_stats); + debugfs_remove(priv->dbg.cpu_stats); + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_eth_dbg_init(void) +{ + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); + if (!dpaa2_dbg_root) { + pr_err("DPAA2-ETH: debugfs create failed\n"); + return; + } + + pr_info("DPAA2-ETH: debugfs created\n"); +} + +void __exit dpaa2_eth_dbg_exit(void) +{ + debugfs_remove(dpaa2_dbg_root); +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h @@ -0,0 +1,60 @@ +/* Copyright 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DPAA2_ETH_DEBUGFS_H +#define DPAA2_ETH_DEBUGFS_H + +#include + +struct dpaa2_eth_priv; + +struct dpaa2_debugfs { + struct dentry *dir; + struct dentry *fq_stats; + struct dentry *ch_stats; + struct dentry *cpu_stats; + struct dentry *reset_stats; + struct dentry *reset_mc_stats; +}; + +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS +void dpaa2_eth_dbg_init(void); +void dpaa2_eth_dbg_exit(void); +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); +#else +static inline void dpaa2_eth_dbg_init(void) {} +static inline void dpaa2_eth_dbg_exit(void) {} +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ + +#endif /* DPAA2_ETH_DEBUGFS_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h @@ -0,0 +1,184 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dpaa2_eth + +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _DPAA2_ETH_TRACE_H + +#include +#include +#include + +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" +/* trace_printk format for raw buffer event class */ +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" + +/* This is used to declare a class of events. + * individual events of this type will be defined below. + */ + +/* Store details about a frame descriptor */ +DECLARE_EVENT_CLASS(dpaa2_eth_fd, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + /* Repeat argument list here */ + TP_ARGS(netdev, fd), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(u64, fd_addr) + __field(u32, fd_len) + __field(u16, fd_offset) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->fd_addr = dpaa2_fd_get_addr(fd); + __entry->fd_len = dpaa2_fd_get_len(fd); + __entry->fd_offset = dpaa2_fd_get_offset(fd); + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_FMT, + __get_str(name), + __entry->fd_addr, + __entry->fd_len, + __entry->fd_offset) +); + +/* Now declare events of the above type. Format is: + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class + */ + +/* Tx (egress) fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Rx fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Tx confirmation fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Log data about raw buffers. Useful for tracing DPBP content. */ +TRACE_EVENT(dpaa2_eth_buf_seed, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). + * The syntax is the same as for DECLARE_EVENT_CLASS(). + */ + +#endif /* _DPAA2_ETH_TRACE_H */ + +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE dpaa2-eth-trace +#include --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -0,0 +1,3516 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../fsl-mc/include/dpbp.h" +#include "../../fsl-mc/include/dpcon.h" +#include "../../fsl-mc/include/mc.h" +#include "../../fsl-mc/include/mc-sys.h" +#include "dpaa2-eth.h" +#include "dpkg.h" + +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files + * using trace events only need to #include + */ +#define CREATE_TRACE_POINTS +#include "dpaa2-eth-trace.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); + +const char dpaa2_eth_drv_version[] = "0.1"; + +void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +static void validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* HW checksum validation is disabled, nothing to do here */ + if (!(priv->net_dev->features & NETIF_F_RXCSUM)) + return; + + /* Read checksum validation bits */ + if (!((fd_status & DPAA2_FAS_L3CV) && + (fd_status & DPAA2_FAS_L4CV))) + return; + + /* Inform the stack there's no need to compute L3/L4 csum anymore */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* Free a received FD. + * Not to be used for Tx conf FDs or on any other paths. + */ +static void free_rx_fd(struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + void *vaddr) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct dpaa2_sg_entry *sgt; + void *sg_vaddr; + int i; + + /* If single buffer frame, just free the data buffer */ + if (fd_format == dpaa2_fd_single) + goto free_buf; + else if (fd_format != dpaa2_fd_sg) + /* we don't support any other format */ + return; + + /* For S/G frames, we first need to free all SG entries */ + sgt = vaddr + dpaa2_fd_get_offset(fd); + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + addr = dpaa2_sg_get_addr(&sgt[i]); + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); + + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + put_page(virt_to_head_page(sg_vaddr)); + + if (dpaa2_sg_is_final(&sgt[i])) + break; + } + +free_buf: + put_page(virt_to_head_page(vaddr)); +} + +/* Build a linear skb based on a single-buffer frame descriptor */ +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct sk_buff *skb = NULL; + u16 fd_offset = dpaa2_fd_get_offset(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + + ch->buf_count--; + + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + + return skb; +} + +/* Build a non linear (fragmented) skb based on a S/G table */ +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_sg_entry *sgt) +{ + struct sk_buff *skb = NULL; + struct device *dev = priv->net_dev->dev.parent; + void *sg_vaddr; + dma_addr_t sg_addr; + u16 sg_offset; + u32 sg_length; + struct page *page, *head_page; + int page_offset; + int i; + + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + struct dpaa2_sg_entry *sge = &sgt[i]; + + /* NOTE: We only support SG entries in dpaa2_sg_single format, + * but this is the only format we may receive from HW anyway + */ + + /* Get the address and length from the S/G entry */ + sg_addr = dpaa2_sg_get_addr(sge); + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr); + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + sg_length = dpaa2_sg_get_len(sge); + + if (i == 0) { + /* We build the skb around the first data buffer */ + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) + goto err_build; + + sg_offset = dpaa2_sg_get_offset(sge); + skb_reserve(skb, sg_offset); + skb_put(skb, sg_length); + } else { + /* Rest of the data buffers are stored as skb frags */ + page = virt_to_page(sg_vaddr); + head_page = virt_to_head_page(sg_vaddr); + + /* Offset in page (which may be compound). + * Data in subsequent SG entries is stored from the + * beginning of the buffer, so we don't need to add the + * sg_offset. + */ + page_offset = ((unsigned long)sg_vaddr & + (PAGE_SIZE - 1)) + + (page_address(page) - page_address(head_page)); + + skb_add_rx_frag(skb, i - 1, head_page, page_offset, + sg_length, DPAA2_ETH_RX_BUF_SIZE); + } + + if (dpaa2_sg_is_final(sge)) + break; + } + + /* Count all data buffers + SG table buffer */ + ch->buf_count -= i + 2; + + return skb; + +err_build: + /* We still need to subtract the buffers used by this FD from our + * software counter + */ + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) + if (dpaa2_sg_is_final(&sgt[i])) + break; + ch->buf_count -= i + 2; + + return NULL; +} + +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +{ + struct device *dev = priv->net_dev->dev.parent; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + /* Same logic as on regular Rx path */ + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]); + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(vaddr)); + } +} + +/* Main Rx frame processing routine */ +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi, + u16 queue_id) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + void *vaddr; + struct sk_buff *skb; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_fas *fas; + void *buf_data; + u32 status = 0; + + /* Tracing point */ + trace_dpaa2_rx_fd(priv->net_dev, fd); + + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); + + /* HWA - FAS, timestamp */ + fas = dpaa2_eth_get_fas(vaddr); + prefetch(fas); + /* data / SG table */ + buf_data = vaddr + dpaa2_fd_get_offset(fd); + prefetch(buf_data); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + switch (fd_format) { + case dpaa2_fd_single: + skb = build_linear_skb(priv, ch, fd, vaddr); + break; + case dpaa2_fd_sg: + skb = build_frag_skb(priv, ch, buf_data); + put_page(virt_to_head_page(vaddr)); + percpu_extras->rx_sg_frames++; + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); + break; + default: + /* We don't support any other format */ + goto err_frame_format; + } + + if (unlikely(!skb)) + goto err_build_skb; + + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->ts_rx_en) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr); + + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + + /* Record Rx queue - this will be used when picking a Tx queue to + * forward the frames. We're keeping flow affinity through the + * network stack. + */ + skb_record_rx_queue(skb, queue_id); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + + napi_gro_receive(napi, skb); + + return; + +err_build_skb: + free_rx_fd(priv, fd, vaddr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE +/* Processing of Rx frames received on the error FQ + * We check and print the error bits and then free the frame + */ +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi __always_unused, + u16 queue_id __always_unused) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t addr = dpaa2_fd_get_addr(fd); + void *vaddr; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_fas *fas; + u32 status = 0; + bool check_fas_errors = false; + + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); + + /* check frame errors in the FD field */ + if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) { + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) && + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n", + fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK); + } + + /* check frame errors in the FAS field */ + if (check_fas_errors) { + fas = dpaa2_eth_get_fas(vaddr); + status = le32_to_cpu(fas->status); + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n", + status & DPAA2_FAS_RX_ERR_MASK); + } + free_rx_fd(priv, fd, vaddr); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_stats->rx_errors++; +} +#endif + +/* Consume all frames pull-dequeued into the store. This is the simplest way to + * make sure we don't accidentally issue another volatile dequeue which would + * overwrite (leak) frames already in the store. + * + * The number of frames is returned using the last 2 output arguments, + * separately for Rx and Tx confirmations. + * + * Observance of NAPI budget is not our concern, leaving that to the caller. + */ +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned, + int *tx_conf_cleaned) +{ + struct dpaa2_eth_priv *priv = ch->priv; + struct dpaa2_eth_fq *fq = NULL; + struct dpaa2_dq *dq; + const struct dpaa2_fd *fd; + int cleaned = 0; + int is_last; + + do { + dq = dpaa2_io_store_next(ch->store, &is_last); + if (unlikely(!dq)) { + /* If we're here, we *must* have placed a + * volatile dequeue comnmand, so keep reading through + * the store until we get some sort of valid response + * token (either a valid frame or an "empty dequeue") + */ + continue; + } + + fd = dpaa2_dq_fd(dq); + + /* prefetch the frame descriptor */ + prefetch(fd); + + fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); + fq->consume(priv, ch, fd, &ch->napi, fq->flowid); + cleaned++; + } while (!is_last); + + if (!cleaned) + return false; + + /* All frames brought in store by a volatile dequeue + * come from the same queue + */ + if (fq->type == DPAA2_TX_CONF_FQ) + *tx_conf_cleaned += cleaned; + else + *rx_cleaned += cleaned; + + fq->stats.frames += cleaned; + ch->stats.frames += cleaned; + + return true; +} + +/* Configure the egress frame annotation for timestamp update */ +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) +{ + struct dpaa2_faead *faead; + u32 ctrl; + u32 frc; + + /* Mark the egress frame annotation area as valid */ + frc = dpaa2_fd_get_frc(fd); + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); + + /* enable UPD (update prepanded data) bit in FAEAD field of + * hardware frame annotation area + */ + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; + faead = dpaa2_eth_get_faead(buf_start); + faead->ctrl = cpu_to_le32(ctrl); +} + +/* Create a frame descriptor based on a fragmented skb */ +static int build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + void *sgt_buf = NULL; + dma_addr_t addr; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct dpaa2_sg_entry *sgt; + int i, err; + int sgt_buf_size; + struct scatterlist *scl, *crt_scl; + int num_sg; + int num_dma_bufs; + struct dpaa2_fas *fas; + struct dpaa2_eth_swa *swa; + + /* Create and map scatterlist. + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have + * to go beyond nr_frags+1. + * Note: We don't support chained scatterlists + */ + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) + return -EINVAL; + + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); + if (unlikely(!scl)) + return -ENOMEM; + + sg_init_table(scl, nr_frags + 1); + num_sg = skb_to_sgvec(skb, scl, 0, skb->len); + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); + if (unlikely(!num_dma_bufs)) { + err = -ENOMEM; + goto dma_map_sg_failed; + } + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); + if (unlikely(!sgt_buf)) { + err = -ENOMEM; + goto sgt_buf_alloc_failed; + } + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); + + /* PTA from egress side is passed as is to the confirmation side so + * we need to clear some fields here in order to find consistent values + * on TX confirmation. We are clearing FAS (Frame Annotation Status) + * field from the hardware annotation area + */ + fas = dpaa2_eth_get_fas(sgt_buf); + memset(fas, 0, DPAA2_FAS_SIZE); + + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Fill in the HW SGT structure. + * + * sgt_buf is zeroed out, so the following fields are implicit + * in all sgt entries: + * - offset is 0 + * - format is 'dpaa2_sg_single' + */ + for_each_sg(scl, crt_scl, num_dma_bufs, i) { + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); + } + dpaa2_sg_set_final(&sgt[i - 1], true); + + /* Store the skb backpointer in the SGT buffer. + * Fit the scatterlist and the number of buffers alongside the + * skb backpointer in the software annotation area. We'll need + * all of them on Tx Conf. + */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->skb = skb; + swa->scl = scl; + swa->num_sg = num_sg; + swa->num_dma_bufs = num_dma_bufs; + + /* Separately map the SGT buffer */ + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + err = -ENOMEM; + goto dma_map_single_failed; + } + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_len(fd, skb->len); + + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA; + + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, sgt_buf); + + return 0; + +dma_map_single_failed: + kfree(sgt_buf); +sgt_buf_alloc_failed: + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); +dma_map_sg_failed: + kfree(scl); + return err; +} + +/* Create a frame descriptor based on a linear skb */ +static int build_single_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + u8 *buffer_start; + struct sk_buff **skbh; + dma_addr_t addr; + struct dpaa2_fas *fas; + + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - + DPAA2_ETH_TX_BUF_ALIGN, + DPAA2_ETH_TX_BUF_ALIGN); + + /* PTA from egress side is passed as is to the confirmation side so + * we need to clear some fields here in order to find consistent values + * on TX confirmation. We are clearing FAS (Frame Annotation Status) + * field from the hardware annotation area + */ + fas = dpaa2_eth_get_fas(buffer_start); + memset(fas, 0, DPAA2_FAS_SIZE); + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it + * on Tx confirm + */ + skbh = (struct sk_buff **)buffer_start; + *skbh = skb; + + addr = dma_map_single(dev, buffer_start, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + return -ENOMEM; + + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA; + + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, buffer_start); + + return 0; +} + +/* FD freeing routine on the Tx path + * + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb + * back-pointed to is also freed. + * This can be called either from dpaa2_eth_tx_conf() or on the error path of + * dpaa2_eth_tx(). + * Optionally, return the frame annotation status word (FAS), which needs + * to be checked if we're on the confirmation path. + */ +static void free_tx_fd(const struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + u32 *status, bool in_napi) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t fd_addr; + struct sk_buff **skbh, *skb; + unsigned char *buffer_start; + int unmap_size; + struct scatterlist *scl; + int num_sg, num_dma_bufs; + struct dpaa2_eth_swa *swa; + u8 fd_format = dpaa2_fd_get_format(fd); + struct dpaa2_fas *fas; + + fd_addr = dpaa2_fd_get_addr(fd); + skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr); + + /* HWA - FAS, timestamp (for Tx confirmation frames) */ + fas = dpaa2_eth_get_fas(skbh); + prefetch(fas); + + switch (fd_format) { + case dpaa2_fd_single: + skb = *skbh; + buffer_start = (unsigned char *)skbh; + /* Accessing the skb buffer is safe before dma unmap, because + * we didn't map the actual skb shell. + */ + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + break; + case dpaa2_fd_sg: + swa = (struct dpaa2_eth_swa *)skbh; + skb = swa->skb; + scl = swa->scl; + num_sg = swa->num_sg; + num_dma_bufs = swa->num_dma_bufs; + + /* Unmap the scatterlist */ + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); + kfree(scl); + + /* Unmap the SGT buffer */ + unmap_size = priv->tx_data_offset + + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); + dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL); + break; + default: + /* Unsupported format, mark it as errored and give up */ + if (status) + *status = ~0; + return; + } + + /* Get the timestamp value */ + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct skb_shared_hwtstamps shhwtstamps; + u64 *ns; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + ns = (u64 *)dpaa2_eth_get_ts(skbh); + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Read the status from the Frame Annotation after we unmap the first + * buffer but before we free it. The caller function is responsible + * for checking the status value. + */ + if (status) + *status = le32_to_cpu(fas->status); + + /* Free SGT buffer kmalloc'ed on tx */ + if (fd_format != dpaa2_fd_single) + kfree(skbh); + + /* Move on with skb release */ + napi_consume_skb(skb, in_napi); +} + +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct dpaa2_fd fd; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct dpaa2_eth_fq *fq; + u16 queue_mapping = skb_get_queue_mapping(skb); + int err, i; + + /* If we're congested, stop this tx queue; transmission of the + * current skb happens regardless of congestion state + */ + fq = &priv->fq[queue_mapping]; + + dma_sync_single_for_cpu(dev, priv->cscn_dma, + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { + netif_stop_subqueue(net_dev, queue_mapping); + fq->stats.congestion_entry++; + } + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { + struct sk_buff *ns; + + ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); + if (unlikely(!ns)) { + percpu_stats->tx_dropped++; + goto err_alloc_headroom; + } + dev_kfree_skb(skb); + skb = ns; + } + + /* We'll be holding a back-reference to the skb until Tx Confirmation; + * we don't want that overwritten by a concurrent Tx with a cloned skb. + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + /* skb_unshare() has already freed the skb */ + percpu_stats->tx_dropped++; + return NETDEV_TX_OK; + } + + /* Setup the FD fields */ + memset(&fd, 0, sizeof(fd)); + + if (skb_is_nonlinear(skb)) { + err = build_sg_fd(priv, skb, &fd); + percpu_extras->tx_sg_frames++; + percpu_extras->tx_sg_bytes += skb->len; + } else { + err = build_single_fd(priv, skb, &fd); + } + + if (unlikely(err)) { + percpu_stats->tx_dropped++; + goto err_build_fd; + } + + /* Tracing point */ + trace_dpaa2_tx_fd(net_dev, &fd); + + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, + fq->tx_qdbin, &fd); + /* TODO: This doesn't work. Check on simulator. + * err = dpaa2_io_service_enqueue_fq(NULL, + * priv->fq[0].fqid_tx, &fd); + */ + if (err != -EBUSY) + break; + } + percpu_extras->tx_portal_busy += i; + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* Clean up everything, including freeing the skb */ + free_tx_fd(priv, &fd, NULL, false); + } else { + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + } + + return NETDEV_TX_OK; + +err_build_fd: +err_alloc_headroom: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* Tx confirmation frame processing routine */ +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi __always_unused, + u16 queue_id) +{ + struct device *dev = priv->net_dev->dev.parent; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + u32 status = 0; + bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK); + bool check_fas_errors = false; + + /* Tracing point */ + trace_dpaa2_tx_conf_fd(priv->net_dev, fd); + + percpu_extras = this_cpu_ptr(priv->percpu_extras); + percpu_extras->tx_conf_frames++; + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); + + /* Check congestion state and wake all queues if necessary */ + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) { + dma_sync_single_for_cpu(dev, priv->cscn_dma, + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); + if (!dpaa2_cscn_state_congested(priv->cscn_mem)) + netif_tx_wake_all_queues(priv->net_dev); + } + + /* check frame errors in the FD field */ + if (unlikely(errors)) { + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) && + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n", + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK); + } + + free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true); + + /* if there are no errors, we're done */ + if (likely(!errors)) + return; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + /* Tx-conf logically pertains to the egress path. */ + percpu_stats->tx_errors++; + + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n", + status & DPAA2_FAS_TX_ERR_MASK); +} + +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n"); + return err; + } + + return 0; +} + +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n"); + return err; + } + + return 0; +} + +/* Perform a single release command to add buffers + * to the specified buffer pool + */ +static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) +{ + struct device *dev = priv->net_dev->dev.parent; + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + void *buf; + dma_addr_t addr; + int i, err; + + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Allocate buffer visible to WRIOP + skb shared info + + * alignment padding. + */ + buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv)); + if (unlikely(!buf)) + goto err_alloc; + + buf = PTR_ALIGN(buf, priv->rx_buf_align); + + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + buf, DPAA2_ETH_BUF_RAW_SIZE(priv), + addr, DPAA2_ETH_RX_BUF_SIZE, + bpid); + } + +release_bufs: + /* In case the portal is busy, retry until successful */ + while ((err = dpaa2_io_service_release(NULL, bpid, + buf_array, i)) == -EBUSY) + cpu_relax(); + + /* If release command failed, clean up and bail out; not much + * else we can do about it + */ + if (unlikely(err)) { + free_bufs(priv, buf_array, i); + return 0; + } + + return i; + +err_map: + put_page(virt_to_head_page(buf)); +err_alloc: + /* If we managed to allocate at least some buffers, release them */ + if (i) + goto release_bufs; + + return 0; +} + +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +{ + int i, j; + int new_count; + + /* This is the lazy seeding of Rx buffer pools. + * dpaa2_add_bufs() is also used on the Rx hotpath and calls + * napi_alloc_frag(). The trouble with that is that it in turn ends up + * calling this_cpu_ptr(), which mandates execution in atomic context. + * Rather than splitting up the code, do a one-off preempt disable. + */ + preempt_disable(); + for (j = 0; j < priv->num_channels; j++) { + priv->channel[j]->buf_count = 0; + for (i = 0; i < priv->num_bufs; + i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = add_bufs(priv, bpid); + priv->channel[j]->buf_count += new_count; + + if (new_count < DPAA2_ETH_BUFS_PER_CMD) { + preempt_enable(); + return -ENOMEM; + } + } + } + preempt_enable(); + + return 0; +} + +/** + * Drain the specified number of buffers from the DPNI's private buffer pool. + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD + */ +static void drain_bufs(struct dpaa2_eth_priv *priv, int count) +{ + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, priv->bpid, + buf_array, count); + if (ret < 0) { + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); + return; + } + free_bufs(priv, buf_array, ret); + } while (ret); +} + +static void drain_pool(struct dpaa2_eth_priv *priv) +{ + preempt_disable(); + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); + drain_bufs(priv, 1); + preempt_enable(); +} + +/* Function is called from softirq context only, so we don't need to guard + * the access to percpu count + */ +static int refill_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + u16 bpid) +{ + int new_count; + + if (likely(ch->buf_count >= priv->refill_thresh)) + return 0; + + do { + new_count = add_bufs(priv, bpid); + if (unlikely(!new_count)) { + /* Out of memory; abort for now, we'll try later on */ + break; + } + ch->buf_count += new_count; + } while (ch->buf_count < priv->num_bufs); + + if (unlikely(ch->buf_count < priv->num_bufs)) + return -ENOMEM; + + return 0; +} + +static int pull_channel(struct dpaa2_eth_channel *ch) +{ + int err; + int dequeues = -1; + + /* Retry while portal is busy */ + do { + err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); + dequeues++; + cpu_relax(); + } while (err == -EBUSY); + + ch->stats.dequeue_portal_busy += dequeues; + if (unlikely(err)) + ch->stats.pull_err++; + + return err; +} + +/* NAPI poll routine + * + * Frames are dequeued from the QMan channel associated with this NAPI context. + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx + * confirmation frames are limited by a threshold per NAPI poll cycle. + */ +static int dpaa2_eth_poll(struct napi_struct *napi, int budget) +{ + struct dpaa2_eth_channel *ch; + int rx_cleaned = 0, tx_conf_cleaned = 0; + bool store_cleaned; + struct dpaa2_eth_priv *priv; + int err; + + ch = container_of(napi, struct dpaa2_eth_channel, napi); + priv = ch->priv; + + do { + err = pull_channel(ch); + if (unlikely(err)) + break; + + /* Refill pool if appropriate */ + refill_pool(priv, ch, priv->bpid); + + store_cleaned = consume_frames(ch, &rx_cleaned, + &tx_conf_cleaned); + + /* If we've either consumed the budget with Rx frames, + * or reached the Tx conf threshold, we're done. + */ + if (rx_cleaned >= budget || + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL) + return budget; + } while (store_cleaned); + + /* We didn't consume the entire budget, finish napi and + * re-enable data availability notifications. + */ + napi_complete(napi); + do { + err = dpaa2_io_service_rearm(NULL, &ch->nctx); + cpu_relax(); + } while (err == -EBUSY); + + return max(rx_cleaned, 1); +} + +static void enable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_enable(&ch->napi); + } +} + +static void disable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_disable(&ch->napi); + } +} + +static int link_state_update(struct dpaa2_eth_priv *priv) +{ + struct dpni_link_state state; + int err; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (unlikely(err)) { + netdev_err(priv->net_dev, + "dpni_get_link_state() failed\n"); + return err; + } + + /* Chech link state; speed / duplex changes are not treated yet */ + if (priv->link_state.up == state.up) + return 0; + + priv->link_state = state; + if (state.up) { + netif_carrier_on(priv->net_dev); + netif_tx_start_all_queues(priv->net_dev); + } else { + netif_tx_stop_all_queues(priv->net_dev); + netif_carrier_off(priv->net_dev); + } + + netdev_info(priv->net_dev, "Link Event: state %s", + state.up ? "up" : "down"); + + return 0; +} + +static int dpaa2_eth_open(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + /* We'll only start the txqs when the link is actually ready; make sure + * we don't race against the link up notification, which may come + * immediately after dpni_enable(); + */ + netif_tx_stop_all_queues(net_dev); + + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will + * return true and cause 'ip link show' to report the LOWER_UP flag, + * even though the link notification wasn't even received. + */ + netif_carrier_off(net_dev); + + err = seed_pool(priv, priv->bpid); + if (err) { + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + priv->dpbp_dev->obj_desc.id, priv->bpid); + } + + if (priv->tx_pause_frames) + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; + else + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD; + + err = dpni_enable(priv->mc_io, 0, priv->mc_token); + if (err < 0) { + netdev_err(net_dev, "dpni_enable() failed\n"); + goto enable_err; + } + + /* If the DPMAC object has already processed the link up interrupt, + * we have to learn the link state ourselves. + */ + err = link_state_update(priv); + if (err < 0) { + netdev_err(net_dev, "Can't update link state\n"); + goto link_state_err; + } + + return 0; + +link_state_err: +enable_err: + priv->refill_thresh = 0; + drain_pool(priv); + return err; +} + +static int dpaa2_eth_stop(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int dpni_enabled; + int retries = 10, i; + + netif_tx_stop_all_queues(net_dev); + netif_carrier_off(net_dev); + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + dpni_disable(priv->mc_io, 0, priv->mc_token); + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); + if (dpni_enabled) + /* Allow the MC some slack */ + msleep(100); + } while (dpni_enabled && --retries); + if (!retries) { + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); + /* Must go on and disable NAPI nonetheless, so we don't crash at + * the next "ifconfig up" + */ + } + + priv->refill_thresh = 0; + + /* Wait for all running napi poll routines to finish, so that no + * new refill operations are started. + */ + for (i = 0; i < priv->num_channels; i++) + napi_synchronize(&priv->channel[i]->napi); + + /* Empty the buffer pool */ + drain_pool(priv); + + return 0; +} + +static int dpaa2_eth_init(struct net_device *net_dev) +{ + u64 supported = 0; + u64 not_supported = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u32 options = priv->dpni_attrs.options; + + /* Capabilities listing */ + supported |= IFF_LIVE_ADDR_CHANGE; + + if (options & DPNI_OPT_NO_MAC_FILTER) + not_supported |= IFF_UNICAST_FLT; + else + supported |= IFF_UNICAST_FLT; + + net_dev->priv_flags |= supported; + net_dev->priv_flags &= ~not_supported; + + /* Features */ + net_dev->features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_LLTX; + net_dev->hw_features = net_dev->features; + + return 0; +} + +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + int err; + + err = eth_mac_addr(net_dev, addr); + if (err < 0) { + dev_err(dev, "eth_mac_addr() failed (%d)\n", err); + return err; + } + + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + net_dev->dev_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); + return err; + } + + return 0; +} + +/** Fill in counters maintained by the GPP driver. These may be different from + * the hardware counters obtained by ethtool. + */ +static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct rtnl_link_stats64 *percpu_stats; + u64 *cpustats; + u64 *netstats = (u64 *)stats; + int i, j; + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); + + for_each_possible_cpu(i) { + percpu_stats = per_cpu_ptr(priv->percpu_stats, i); + cpustats = (u64 *)percpu_stats; + for (j = 0; j < num; j++) + netstats[j] += cpustats[j]; + } + return stats; +} + +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + /* Set the maximum Rx frame length to match the transmit side; + * account for L2 headers when computing the MFL + */ + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, + (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); + if (err) { + netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); + return err; + } + + net_dev->mtu = mtu; + return 0; +} + +/* Copy mac unicast addresses from @net_dev to @priv. + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_uc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_uc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add ucast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +/* Copy mac multicast addresses from @net_dev to @priv + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_mc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_mc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add mcast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int uc_count = netdev_uc_count(net_dev); + int mc_count = netdev_mc_count(net_dev); + u8 max_mac = priv->dpni_attrs.mac_filter_entries; + u32 options = priv->dpni_attrs.options; + u16 mc_token = priv->mc_token; + struct fsl_mc_io *mc_io = priv->mc_io; + int err; + + /* Basic sanity checks; these probably indicate a misconfiguration */ + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) + netdev_info(net_dev, + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", + max_mac); + + /* Force promiscuous if the uc or mc counts exceed our capabilities. */ + if (uc_count > max_mac) { + netdev_info(net_dev, + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count, max_mac); + goto force_promisc; + } + if (mc_count + uc_count > max_mac) { + netdev_info(net_dev, + "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count + mc_count, max_mac); + goto force_mc_promisc; + } + + /* Adjust promisc settings due to flag combinations */ + if (net_dev->flags & IFF_PROMISC) + goto force_promisc; + if (net_dev->flags & IFF_ALLMULTI) { + /* First, rebuild unicast filtering table. This should be done + * in promisc mode, in order to avoid frame loss while we + * progressively add entries to the table. + * We don't know whether we had been in promisc already, and + * making an MC call to find out is expensive; so set uc promisc + * nonetheless. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc\n"); + + /* Actual uc table reconstruction. */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc filters\n"); + add_uc_hw_addr(net_dev, priv); + + /* Finally, clear uc promisc and set mc promisc as requested. */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc promisc\n"); + goto force_mc_promisc; + } + + /* Neither unicast, nor multicast promisc will be on... eventually. + * For now, rebuild mac filtering tables while forcing both of them on. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); + + /* Actual mac filtering tables reconstruction */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); + if (err) + netdev_warn(net_dev, "Can't clear mac filters\n"); + add_mc_hw_addr(net_dev, priv); + add_uc_hw_addr(net_dev, priv); + + /* Now we can clear both ucast and mcast promisc, without risking + * to drop legitimate frames anymore. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear ucast promisc\n"); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear mcast promisc\n"); + + return; + +force_promisc: + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set ucast promisc\n"); +force_mc_promisc: + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mcast promisc\n"); +} + +static int dpaa2_eth_set_features(struct net_device *net_dev, + netdev_features_t features) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + netdev_features_t changed = features ^ net_dev->features; + bool enable; + int err; + + if (changed & NETIF_F_RXCSUM) { + enable = !!(features & NETIF_F_RXCSUM); + err = set_rx_csum(priv, enable); + if (err) + return err; + } + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + err = set_tx_csum(priv, enable); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->ts_tx_en = false; + break; + case HWTSTAMP_TX_ON: + priv->ts_tx_en = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + priv->ts_rx_en = false; + } else { + priv->ts_rx_en = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (cmd == SIOCSHWTSTAMP) + return dpaa2_eth_ts_ioctl(dev, rq, cmd); + + return -EINVAL; +} + +static const struct net_device_ops dpaa2_eth_ops = { + .ndo_open = dpaa2_eth_open, + .ndo_start_xmit = dpaa2_eth_tx, + .ndo_stop = dpaa2_eth_stop, + .ndo_init = dpaa2_eth_init, + .ndo_set_mac_address = dpaa2_eth_set_addr, + .ndo_get_stats64 = dpaa2_eth_get_stats, + .ndo_change_mtu = dpaa2_eth_change_mtu, + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, + .ndo_set_features = dpaa2_eth_set_features, + .ndo_do_ioctl = dpaa2_eth_ioctl, +}; + +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) +{ + struct dpaa2_eth_channel *ch; + + ch = container_of(ctx, struct dpaa2_eth_channel, nctx); + + /* Update NAPI statistics */ + ch->stats.cdan++; + + napi_schedule_irqoff(&ch->napi); +} + +/* Allocate and configure a DPCON object */ +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) +{ + struct fsl_mc_device *dpcon; + struct device *dev = priv->net_dev->dev.parent; + struct dpcon_attr attrs; + int err; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), + FSL_MC_POOL_DPCON, &dpcon); + if (err) { + dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + return NULL; + } + + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_open() failed\n"); + goto err_open; + } + + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_reset() failed\n"); + goto err_reset; + } + + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_enable() failed\n"); + goto err_enable; + } + + return dpcon; + +err_enable: +err_get_attr: +err_reset: + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); +err_open: + fsl_mc_object_free(dpcon); + + return NULL; +} + +static void free_dpcon(struct dpaa2_eth_priv *priv, + struct fsl_mc_device *dpcon) +{ + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); + fsl_mc_object_free(dpcon); +} + +static struct dpaa2_eth_channel * +alloc_channel(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *channel; + struct dpcon_attr attr; + struct device *dev = priv->net_dev->dev.parent; + int err; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->dpcon = setup_dpcon(priv); + if (!channel->dpcon) + goto err_setup; + + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, + &attr); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + + channel->dpcon_id = attr.id; + channel->ch_id = attr.qbman_ch_id; + channel->priv = priv; + + return channel; + +err_get_attr: + free_dpcon(priv, channel->dpcon); +err_setup: + kfree(channel); + return NULL; +} + +static void free_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *channel) +{ + free_dpcon(priv, channel->dpcon); + kfree(channel); +} + +/* DPIO setup: allocate and configure QBMan channels, setup core affinity + * and register data availability notifications + */ +static int setup_dpio(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_io_notification_ctx *nctx; + struct dpaa2_eth_channel *channel; + struct dpcon_notification_cfg dpcon_notif_cfg; + struct device *dev = priv->net_dev->dev.parent; + int i, err; + + /* We want the ability to spread ingress traffic (RX, TX conf) to as + * many cores as possible, so we need one channel for each core + * (unless there's fewer queues than cores, in which case the extra + * channels would be wasted). + * Allocate one channel per core and register it to the core's + * affine DPIO. If not enough channels are available for all cores + * or if some cores don't have an affine DPIO, there will be no + * ingress frame processing on those cores. + */ + cpumask_clear(&priv->dpio_cpumask); + for_each_online_cpu(i) { + /* Try to allocate a channel */ + channel = alloc_channel(priv); + if (!channel) { + dev_info(dev, + "No affine channel for cpu %d and above\n", i); + goto err_alloc_ch; + } + + priv->channel[priv->num_channels] = channel; + + nctx = &channel->nctx; + nctx->is_cdan = 1; + nctx->cb = cdan_cb; + nctx->id = channel->ch_id; + nctx->desired_cpu = i; + + /* Register the new context */ + err = dpaa2_io_service_register(NULL, nctx); + if (err) { + dev_dbg(dev, "No affine DPIO for cpu %d\n", i); + /* If no affine DPIO for this core, there's probably + * none available for next cores either. + */ + goto err_service_reg; + } + + /* Register DPCON notification with MC */ + dpcon_notif_cfg.dpio_id = nctx->dpio_id; + dpcon_notif_cfg.priority = 0; + dpcon_notif_cfg.user_ctx = nctx->qman64; + err = dpcon_set_notification(priv->mc_io, 0, + channel->dpcon->mc_handle, + &dpcon_notif_cfg); + if (err) { + dev_err(dev, "dpcon_set_notification failed()\n"); + goto err_set_cdan; + } + + /* If we managed to allocate a channel and also found an affine + * DPIO for this core, add it to the final mask + */ + cpumask_set_cpu(i, &priv->dpio_cpumask); + priv->num_channels++; + + /* Stop if we already have enough channels to accommodate all + * RX and TX conf queues + */ + if (priv->num_channels == dpaa2_eth_queue_count(priv)) + break; + } + + /* Tx confirmation queues can only be serviced by cpus + * with an affine DPIO/channel + */ + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); + + return 0; + +err_set_cdan: + dpaa2_io_service_deregister(NULL, nctx); +err_service_reg: + free_channel(priv, channel); +err_alloc_ch: + if (cpumask_empty(&priv->dpio_cpumask)) { + dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n"); + return -ENODEV; + } + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); + + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", + cpumask_pr_args(&priv->dpio_cpumask)); + + return 0; +} + +static void free_dpio(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + /* deregister CDAN notifications and free channels */ + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + dpaa2_io_service_deregister(NULL, &ch->nctx); + free_channel(priv, ch); + } +} + +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, + int cpu) +{ + struct device *dev = priv->net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->nctx.desired_cpu == cpu) + return priv->channel[i]; + + /* We should never get here. Issue a warning and return + * the first channel, because it's still better than nothing + */ + dev_warn(dev, "No affine channel found for cpu %d\n", cpu); + + return priv->channel[0]; +} + +static void set_fq_affinity(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct cpumask xps_mask = CPU_MASK_NONE; + struct dpaa2_eth_fq *fq; + int rx_cpu, txc_cpu; + int i, err; + + /* For each FQ, pick one channel/CPU to deliver frames to. + * This may well change at runtime, either through irqbalance or + * through direct user intervention. + */ + rx_cpu = cpumask_first(&priv->dpio_cpumask); + txc_cpu = cpumask_first(&priv->txconf_cpumask); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + switch (fq->type) { + case DPAA2_RX_FQ: + case DPAA2_RX_ERR_FQ: + fq->target_cpu = rx_cpu; + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); + if (rx_cpu >= nr_cpu_ids) + rx_cpu = cpumask_first(&priv->dpio_cpumask); + break; + case DPAA2_TX_CONF_FQ: + fq->target_cpu = txc_cpu; + + /* register txc_cpu to XPS */ + cpumask_set_cpu(txc_cpu, &xps_mask); + err = netif_set_xps_queue(priv->net_dev, &xps_mask, + fq->flowid); + if (err) + dev_info_once(dev, + "Tx: error setting XPS queue\n"); + cpumask_clear_cpu(txc_cpu, &xps_mask); + + txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); + if (txc_cpu >= nr_cpu_ids) + txc_cpu = cpumask_first(&priv->txconf_cpumask); + break; + default: + dev_err(dev, "Unknown FQ type: %d\n", fq->type); + } + fq->channel = get_affine_channel(priv, fq->target_cpu); + } +} + +static void setup_fqs(struct dpaa2_eth_priv *priv) +{ + int i, j; + + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the + * beginning of the queue array. + * Number of Rx and Tx queues are the same. + * We only support one traffic class for now. + */ + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) + for (j = 0; j < dpaa2_eth_queue_count(priv); j++) { + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; + priv->fq[priv->num_fqs].tc = (u8)i; + priv->fq[priv->num_fqs++].flowid = (u16)j; + } + +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE + /* We have exactly one Rx error queue per DPNI */ + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; +#endif + + /* For each FQ, decide on which core to process incoming frames */ + set_fq_affinity(priv); +} + +/* Allocate and configure one buffer pool for each interface */ +static int setup_dpbp(struct dpaa2_eth_priv *priv) +{ + int err; + struct fsl_mc_device *dpbp_dev; + struct dpbp_attr dpbp_attrs; + struct device *dev = priv->net_dev->dev.parent; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, + &dpbp_dev); + if (err) { + dev_err(dev, "DPBP device allocation failed\n"); + return err; + } + + priv->dpbp_dev = dpbp_dev; + + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + &dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_open() failed\n"); + goto err_open; + } + + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_reset() failed\n"); + goto err_reset; + } + + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); + goto err_enable; + } + + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, + &dpbp_attrs); + if (err) { + dev_err(dev, "dpbp_get_attributes() failed\n"); + goto err_get_attr; + } + + priv->bpid = dpbp_attrs.bpid; + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels; + + return 0; + +err_get_attr: + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); +err_enable: +err_reset: + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); +err_open: + fsl_mc_object_free(dpbp_dev); + + return err; +} + +static void free_dpbp(struct dpaa2_eth_priv *priv) +{ + drain_pool(priv); + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + fsl_mc_object_free(priv->dpbp_dev); +} + +static int setup_tx_congestion(struct dpaa2_eth_priv *priv) +{ + struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 }; + struct device *dev = priv->net_dev->dev.parent; + int err; + + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, + GFP_KERNEL); + if (!priv->cscn_unaligned) + return -ENOMEM; + + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN); + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, priv->cscn_dma)) { + dev_err(dev, "Error mapping CSCN memory area\n"); + err = -ENOMEM; + goto err_dma_map; + } + + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES; + cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH; + cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH; + cong_notif_cfg.message_ctx = (u64)priv; + cong_notif_cfg.message_iova = priv->cscn_dma; + cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | + DPNI_CONG_OPT_COHERENT_WRITE; + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, 0, + &cong_notif_cfg); + if (err) { + dev_err(dev, "dpni_set_congestion_notification failed\n"); + goto err_set_cong; + } + + return 0; + +err_set_cong: + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); +err_dma_map: + kfree(priv->cscn_unaligned); + + return err; +} + +/* Configure the DPNI object this interface is associated with */ +static int setup_dpni(struct fsl_mc_device *ls_dev) +{ + struct device *dev = &ls_dev->dev; + struct dpaa2_eth_priv *priv; + struct net_device *net_dev; + struct dpni_buffer_layout buf_layout; + struct dpni_link_cfg cfg = {0}; + int err; + + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + priv->dpni_id = ls_dev->obj_desc.id; + + /* get a handle for the DPNI object */ + err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); + if (err) { + dev_err(dev, "dpni_open() failed\n"); + goto err_open; + } + + ls_dev->mc_io = priv->mc_io; + ls_dev->mc_handle = priv->mc_token; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) { + dev_err(dev, "dpni_reset() failed\n"); + goto err_reset; + } + + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, + &priv->dpni_attrs); + + if (err) { + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); + goto err_get_attr; + } + + /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf + * align value must be a multiple of 256. + */ + priv->rx_buf_align = + priv->dpni_attrs.wriop_version & 0x3ff ? + DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1; + + /* Update number of logical FQs in netdev */ + err = netif_set_real_num_tx_queues(net_dev, + dpaa2_eth_queue_count(priv)); + if (err) { + dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err); + goto err_set_tx_queues; + } + + err = netif_set_real_num_rx_queues(net_dev, + dpaa2_eth_queue_count(priv)); + if (err) { + dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err); + goto err_set_rx_queues; + } + + /* Configure buffer layouts */ + /* rx buffer */ + buf_layout.pass_parser_result = true; + buf_layout.pass_frame_status = true; + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; + buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM; + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, &buf_layout); + if (err) { + dev_err(dev, + "dpni_set_buffer_layout(RX) failed\n"); + goto err_buf_layout; + } + + /* tx buffer */ + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + buf_layout.pass_timestamp = true; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &buf_layout); + if (err) { + dev_err(dev, + "dpni_set_buffer_layout(TX) failed\n"); + goto err_buf_layout; + } + + /* tx-confirm buffer */ + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); + goto err_buf_layout; + } + + /* Now that we've set our tx buffer layout, retrieve the minimum + * required tx data offset. + */ + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, + &priv->tx_data_offset); + if (err) { + dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err); + goto err_data_offset; + } + + if ((priv->tx_data_offset % 64) != 0) + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", + priv->tx_data_offset); + + /* Enable congestion notifications for Tx queues */ + err = setup_tx_congestion(priv); + if (err) + goto err_tx_cong; + + /* allocate classification rule space */ + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * + dpaa2_eth_fs_count(priv), GFP_KERNEL); + if (!priv->cls_rule) + goto err_cls_rule; + + /* Enable flow control */ + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE; + priv->tx_pause_frames = 1; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) { + netdev_err(net_dev, "ERROR %d setting link cfg", err); + goto err_set_link_cfg; + } + + return 0; + +err_set_link_cfg: +err_cls_rule: +err_tx_cong: +err_data_offset: +err_buf_layout: +err_set_rx_queues: +err_set_tx_queues: +err_get_attr: +err_reset: + dpni_close(priv->mc_io, 0, priv->mc_token); +err_open: + return err; +} + +static void free_dpni(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + int err; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", + err); + + dpni_close(priv->mc_io, 0, priv->mc_token); + + kfree(priv->cls_rule); + + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); + kfree(priv->cscn_unaligned); +} + +static int set_queue_taildrop(struct dpaa2_eth_priv *priv, + struct dpni_taildrop *td) +{ + struct device *dev = priv->net_dev->dev.parent; + int err, i; + + + for (i = 0; i < priv->num_fqs; i++) { + if (priv->fq[i].type != DPAA2_RX_FQ) + continue; + + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + priv->fq[i].tc, priv->fq[i].flowid, + td); + if (err) { + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); + return err; + } + } + + return 0; +} + +static int set_group_taildrop(struct dpaa2_eth_priv *priv, + struct dpni_taildrop *td) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_taildrop disable_td, *tc_td; + int i, err; + + memset(&disable_td, 0, sizeof(struct dpni_taildrop)); + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i)) + /* Do not set taildrop thresholds for PFC-enabled + * traffic classes. We will enable congestion + * notifications for them. + */ + tc_td = &disable_td; + else + tc_td = td; + + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, + DPNI_CP_GROUP, DPNI_QUEUE_RX, + i, 0, tc_td); + if (err) { + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); + return err; + } + } + return 0; +} + +/* Enable/disable Rx FQ taildrop + * + * Rx FQ taildrop is mutually exclusive with flow control and it only gets + * disabled when FC is active. Depending on FC status, we need to compute + * the maximum number of buffers in the pool differently, so use the + * opportunity to update max number of buffers as well. + */ +int set_rx_taildrop(struct dpaa2_eth_priv *priv) +{ + enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv); + struct dpni_taildrop td_queue, td_group; + int err = 0; + + switch (cfg) { + case DPAA2_ETH_TD_NONE: + memset(&td_queue, 0, sizeof(struct dpni_taildrop)); + memset(&td_group, 0, sizeof(struct dpni_taildrop)); + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / + priv->num_channels; + break; + case DPAA2_ETH_TD_QUEUE: + memset(&td_group, 0, sizeof(struct dpni_taildrop)); + td_queue.enable = 1; + td_queue.units = DPNI_CONGESTION_UNIT_BYTES; + td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH / + dpaa2_eth_tc_count(priv); + priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD; + break; + case DPAA2_ETH_TD_GROUP: + memset(&td_queue, 0, sizeof(struct dpni_taildrop)); + td_group.enable = 1; + td_group.units = DPNI_CONGESTION_UNIT_FRAMES; + td_group.threshold = NAPI_POLL_WEIGHT * + dpaa2_eth_queue_count(priv); + priv->num_bufs = NAPI_POLL_WEIGHT * + dpaa2_eth_tc_count(priv); + break; + default: + break; + } + + err = set_queue_taildrop(priv, &td_queue); + if (err) + return err; + + err = set_group_taildrop(priv, &td_group); + if (err) + return err; + + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; + + return 0; +} + +static int setup_rx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue q = { { 0 } }; + struct dpni_queue_id qid; + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; + } + + fq->fqid = qid.fqid; + + q.destination.id = fq->channel->dpcon_id; + q.destination.type = DPNI_DEST_DPCON; + q.destination.priority = 1; + q.user_context = (u64)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q); + if (err) { + dev_err(dev, "dpni_set_queue() failed (%d)\n", err); + return err; + } + + return 0; +} + +static int setup_tx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue q = { { 0 } }; + struct dpni_queue_id qid; + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; + } + + fq->tx_qdbin = qid.qdbin; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; + } + + fq->fqid = qid.fqid; + + q.destination.id = fq->channel->dpcon_id; + q.destination.type = DPNI_DEST_DPCON; + q.destination.priority = 0; + q.user_context = (u64)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; + } + + return 0; +} + +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue q = { { 0 } }; + struct dpni_queue_id qid; + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; + } + + fq->fqid = qid.fqid; + + q.destination.id = fq->channel->dpcon_id; + q.destination.type = DPNI_DEST_DPCON; + q.destination.priority = 1; + q.user_context = (u64)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); + if (err) { + dev_err(dev, "dpni_set_queue() failed (%d)\n", err); + return err; + } + + return 0; +} +#endif + +/* default hash key fields */ +static struct dpaa2_eth_hash_fields default_hash_fields[] = { + { + /* L2 header */ + .rxnfc_field = RXH_L2DA, + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_DA, + .size = 6, + }, { + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_SA, + .size = 6, + }, { + /* This is the last ethertype field parsed: + * depending on frame format, it can be the MAC ethertype + * or the VLAN etype. + */ + .cls_prot = NET_PROT_ETH, + .cls_field = NH_FLD_ETH_TYPE, + .size = 2, + }, { + /* VLAN header */ + .rxnfc_field = RXH_VLAN, + .cls_prot = NET_PROT_VLAN, + .cls_field = NH_FLD_VLAN_TCI, + .size = 2, + }, { + /* IP header */ + .rxnfc_field = RXH_IP_SRC, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_SRC, + .size = 4, + }, { + .rxnfc_field = RXH_IP_DST, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_DST, + .size = 4, + }, { + .rxnfc_field = RXH_L3_PROTO, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_PROTO, + .size = 1, + }, { + /* Using UDP ports, this is functionally equivalent to raw + * byte pairs from L4 header. + */ + .rxnfc_field = RXH_L4_B_0_1, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_SRC, + .size = 2, + }, { + .rxnfc_field = RXH_L4_B_2_3, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_DST, + .size = 2, + }, +}; + +/* Set RX hash options */ +static int set_hash(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpkg_profile_cfg cls_cfg; + struct dpni_rx_tc_dist_cfg dist_cfg; + u8 *dma_mem; + int i; + int err = 0; + + memset(&cls_cfg, 0, sizeof(cls_cfg)); + + for (i = 0; i < priv->num_hash_fields; i++) { + struct dpkg_extract *key = + &cls_cfg.extracts[cls_cfg.num_extracts]; + + key->type = DPKG_EXTRACT_FROM_HDR; + key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; + key->extract.from_hdr.type = DPKG_FULL_FIELD; + key->extract.from_hdr.field = priv->hash_fields[i].cls_field; + cls_cfg.num_extracts++; + + priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; + } + + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); + if (err) { + dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err); + goto err_prep_key; + } + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + /* Prepare for setting the rx dist */ + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, + DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { + dev_err(dev, "DMA mapping failed\n"); + err = -ENOMEM; + goto err_dma_map; + } + + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + if (dpaa2_eth_fs_enabled(priv)) { + dist_cfg.dist_mode = DPNI_DIST_MODE_FS; + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; + } else { + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + } + + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i, + &dist_cfg); + if (err) + break; + } + + dma_unmap_single(dev, dist_cfg.key_cfg_iova, + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); + if (err) + dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err); + +err_dma_map: +err_prep_key: + kfree(dma_mem); + return err; +} + +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ +static int bind_dpni(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + struct dpni_pools_cfg pools_params; + struct dpni_error_cfg err_cfg; + int err = 0; + int i; + + pools_params.num_dpbp = 1; + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].backup_pool = 0; + pools_params.pools[0].priority_mask = 0xff; + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + dev_err(dev, "dpni_set_pools() failed\n"); + return err; + } + + /* Verify classification options and disable hashing and/or + * flow steering support in case of invalid configuration values + */ + priv->hash_fields = default_hash_fields; + priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); + check_cls_support(priv); + + /* have the interface implicitly distribute traffic based on + * a static hash key + */ + if (dpaa2_eth_hash_enabled(priv)) { + err = set_hash(priv); + if (err) { + dev_err(dev, "Hashing configuration failed\n"); + return err; + } + } + + /* Configure handling of error frames */ + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; + err_cfg.set_frame_annotation = 1; +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; +#else + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; +#endif + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, + &err_cfg); + if (err) { + dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err); + return err; + } + + /* Configure Rx and Tx conf queues to generate CDANs */ + for (i = 0; i < priv->num_fqs; i++) { + switch (priv->fq[i].type) { + case DPAA2_RX_FQ: + err = setup_rx_flow(priv, &priv->fq[i]); + break; + case DPAA2_TX_CONF_FQ: + err = setup_tx_flow(priv, &priv->fq[i]); + break; +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE + case DPAA2_RX_ERR_FQ: + err = setup_rx_err_flow(priv, &priv->fq[i]); + break; +#endif + default: + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); + return -EINVAL; + } + if (err) + return err; + } + + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX, + &priv->tx_qdid); + if (err) { + dev_err(dev, "dpni_get_qdid() failed\n"); + return err; + } + + return 0; +} + +/* Allocate rings for storing incoming frame descriptors */ +static int alloc_rings(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) { + priv->channel[i]->store = + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); + if (!priv->channel[i]->store) { + netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); + goto err_ring; + } + } + + return 0; + +err_ring: + for (i = 0; i < priv->num_channels; i++) { + if (!priv->channel[i]->store) + break; + dpaa2_io_store_destroy(priv->channel[i]->store); + } + + return -ENOMEM; +} + +static void free_rings(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_channels; i++) + dpaa2_io_store_destroy(priv->channel[i]->store); +} + +static int netdev_init(struct net_device *net_dev) +{ + int err; + struct device *dev = net_dev->dev.parent; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; + u8 bcast_addr[ETH_ALEN]; + u16 rx_headroom, rx_req_headroom; + + net_dev->netdev_ops = &dpaa2_eth_ops; + + /* Get firmware address, if any */ + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); + if (err) { + dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err); + return err; + } + + /* Get DPNI atttributes address, if any */ + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + dpni_mac_addr); + if (err) { + dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err); + return err; + } + + /* First check if firmware has any address configured by bootloader */ + if (!is_zero_ether_addr(mac_addr)) { + /* If the DPMAC addr != the DPNI addr, update it */ + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { + err = dpni_set_primary_mac_addr(priv->mc_io, 0, + priv->mc_token, + mac_addr); + if (err) { + dev_err(dev, + "dpni_set_primary_mac_addr() failed (%d)\n", + err); + return err; + } + } + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else if (is_zero_ether_addr(dpni_mac_addr)) { + /* Fills in net_dev->dev_addr, as required by + * register_netdevice() + */ + eth_hw_addr_random(net_dev); + /* Make the user aware, without cluttering the boot log */ + dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n"); + err = dpni_set_primary_mac_addr(priv->mc_io, 0, + priv->mc_token, net_dev->dev_addr); + if (err) { + dev_err(dev, + "dpni_set_primary_mac_addr() failed (%d)\n", err); + return err; + } + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all + * practical purposes, this will be our "permanent" mac address, + * at least until the next reboot. This move will also permit + * register_netdevice() to properly fill up net_dev->perm_addr. + */ + net_dev->addr_assign_type = NET_ADDR_PERM; + /* If DPMAC address is non-zero, use that one */ + } else { + /* NET_ADDR_PERM is default, all we have to do is + * fill in the device addr. + */ + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); + } + + /* Explicitly add the broadcast address to the MAC filtering table; + * the MC won't do that for us. + */ + eth_broadcast_addr(bcast_addr); + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); + if (err) { + dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); + /* Won't return an error; at least, we'd have egress traffic */ + } + + /* Reserve enough space to align buffer as per hardware requirement; + * NOTE: priv->tx_data_offset MUST be initialized at this point. + */ + net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); + + /* Set MTU limits */ + net_dev->min_mtu = 68; + net_dev->max_mtu = DPAA2_ETH_MAX_MTU; + + /* Required headroom for Rx skbs, to avoid reallocation on + * forwarding path. + */ + rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN; + rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE + + DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align); + if (rx_req_headroom > rx_headroom) + dev_info_once(dev, + "Required headroom (%d) greater than available (%d).\n" + "This will impact performance due to reallocations.\n", + rx_req_headroom, rx_headroom); + + /* Our .ndo_init will be called herein */ + err = register_netdev(net_dev); + if (err < 0) { + dev_err(dev, "register_netdev() failed (%d)\n", err); + return err; + } + + return 0; +} + +static int poll_link_state(void *arg) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; + int err; + + while (!kthread_should_stop()) { + err = link_state_update(priv); + if (unlikely(err)) + return err; + + msleep(DPAA2_ETH_LINK_STATE_REFRESH); + } + + return 0; +} + +static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) +{ + u32 status = 0, clear = 0; + struct device *dev = (struct device *)arg; + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); + struct net_device *net_dev = dev_get_drvdata(dev); + int err; + + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, + DPNI_IRQ_INDEX, &status); + if (unlikely(err)) { + netdev_err(net_dev, "Can't get irq status (err %d)", err); + clear = 0xffffffff; + goto out; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { + clear |= DPNI_IRQ_EVENT_LINK_CHANGED; + link_state_update(netdev_priv(net_dev)); + } + +out: + dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, + DPNI_IRQ_INDEX, clear); + return IRQ_HANDLED; +} + +static int setup_irqs(struct fsl_mc_device *ls_dev) +{ + int err = 0; + struct fsl_mc_device_irq *irq; + + err = fsl_mc_allocate_irqs(ls_dev); + if (err) { + dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); + return err; + } + + irq = ls_dev->irqs[0]; + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, + dpni_irq0_handler, + dpni_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&ls_dev->dev), &ls_dev->dev); + if (err < 0) { + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); + goto free_mc_irq; + } + + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); + goto free_irq; + } + + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, 1); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); + goto free_irq; + } + + return 0; + +free_irq: + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); +free_mc_irq: + fsl_mc_free_irqs(ls_dev); + + return err; +} + +static void add_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, + NAPI_POLL_WEIGHT); + } +} + +static void del_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + netif_napi_del(&ch->napi); + } +} + +/* SysFS support */ +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + /* No MC API for getting the shaping config. We're stateful. */ + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; + + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); +} + +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int err, items; + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + struct dpni_tx_shaping_cfg scfg; + + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); + if (items != 2) { + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); + return -EINVAL; + } + /* Size restriction as per MC API documentation */ + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) { + pr_err("max_burst_size must be <= %d\n", + DPAA2_ETH_MAX_BURST_SIZE); + return -EINVAL; + } + + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); + if (err) { + dev_err(dev, "dpni_set_tx_shaping() failed\n"); + return -EPERM; + } + /* If successful, save the current configuration for future inquiries */ + priv->shaping_cfg = scfg; + + return count; +} + +static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + + return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); +} + +static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + struct dpaa2_eth_fq *fq; + bool running = netif_running(priv->net_dev); + int i, err; + + err = cpulist_parse(buf, &priv->txconf_cpumask); + if (err) + return err; + + /* Only accept CPUs that have an affine DPIO */ + if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { + netdev_info(priv->net_dev, + "cpumask must be a subset of 0x%lx\n", + *cpumask_bits(&priv->dpio_cpumask)); + cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, + &priv->txconf_cpumask); + } + + /* Rewiring the TxConf FQs requires interface shutdown. + */ + if (running) { + err = dpaa2_eth_stop(priv->net_dev); + if (err) + return -ENODEV; + } + + /* Set the new TxConf FQ affinities */ + set_fq_affinity(priv); + + /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit + * link up notification is received. Give the polling thread enough time + * to detect the link state change, or else we'll end up with the + * transmission side forever shut down. + */ + if (priv->do_link_poll) + msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + if (fq->type != DPAA2_TX_CONF_FQ) + continue; + setup_tx_flow(priv, fq); + } + + if (running) { + err = dpaa2_eth_open(priv->net_dev); + if (err) + return -ENODEV; + } + + return count; +} + +static struct device_attribute dpaa2_eth_attrs[] = { + __ATTR(txconf_cpumask, + 0600, + dpaa2_eth_show_txconf_cpumask, + dpaa2_eth_write_txconf_cpumask), + + __ATTR(tx_shaping, + 0600, + dpaa2_eth_show_tx_shaping, + dpaa2_eth_write_tx_shaping), +}; + +static void dpaa2_eth_sysfs_init(struct device *dev) +{ + int i, err; + + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { + err = device_create_file(dev, &dpaa2_eth_attrs[i]); + if (err) { + dev_err(dev, "ERROR creating sysfs file\n"); + goto undo; + } + } + return; + +undo: + while (i > 0) + device_remove_file(dev, &dpaa2_eth_attrs[--i]); +} + +static void dpaa2_eth_sysfs_remove(struct device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) + device_remove_file(dev, &dpaa2_eth_attrs[i]); +} + +#ifdef CONFIG_FSL_DPAA2_ETH_DCB +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev, + struct ieee_pfc *pfc) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_congestion_notification_cfg notification_cfg; + struct dpni_link_state state; + int err, i; + + pfc->pfc_cap = dpaa2_eth_tc_count(priv); + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state", err); + return err; + } + + if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE)) + return 0; + + priv->pfc.pfc_en = 0; + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + err = dpni_get_congestion_notification(priv->mc_io, 0, + priv->mc_token, + DPNI_QUEUE_RX, + i, ¬ification_cfg); + if (err) { + netdev_err(net_dev, "Error %d getting congestion notif", + err); + return err; + } + + if (notification_cfg.threshold_entry) + priv->pfc.pfc_en |= 1 << i; + } + + pfc->pfc_en = priv->pfc.pfc_en; + pfc->mbc = priv->pfc.mbc; + pfc->delay = priv->pfc.delay; + + return 0; +} + +/* Configure ingress classification based on VLAN PCP */ +static int set_vlan_qos(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpkg_profile_cfg kg_cfg = {0}; + struct dpni_qos_tbl_cfg qos_cfg = {0}; + struct dpni_rule_cfg key_params; + u8 *params_iova; + __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK); + int err = 0, i, j = 0; + + if (priv->vlan_clsf_set) + return 0; + + params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); + if (!params_iova) + return -ENOMEM; + + kg_cfg.num_extracts = 1; + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; + + err = dpni_prepare_key_cfg(&kg_cfg, params_iova); + if (err) { + dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err); + goto out_free; + } + + /* Set QoS table */ + qos_cfg.default_tc = 0; + qos_cfg.discard_on_miss = 0; + qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova, + DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { + dev_err(dev, "%s: DMA mapping failed\n", __func__); + err = -ENOMEM; + goto out_free; + } + err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); + dma_unmap_single(dev, qos_cfg.key_cfg_iova, + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); + + if (err) { + dev_err(dev, "dpni_set_qos_table failed: %d\n", err); + goto out_free; + } + + key_params.key_size = sizeof(key); + + if (dpaa2_eth_fs_mask_enabled(priv)) { + key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_params.mask_iova)) { + dev_err(dev, "DMA mapping failed %s\n", __func__); + err = -ENOMEM; + goto out_free; + } + } else { + key_params.mask_iova = 0; + } + + key_params.key_iova = dma_map_single(dev, &key, sizeof(key), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_params.key_iova)) { + dev_err(dev, "%s: DMA mapping failed\n", __func__); + err = -ENOMEM; + goto out_unmap_mask; + } + + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + key = cpu_to_be16(i << VLAN_PRIO_SHIFT); + dma_sync_single_for_device(dev, key_params.key_iova, + sizeof(key), DMA_TO_DEVICE); + + err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, + &key_params, i, j++); + if (err) { + dev_err(dev, "dpni_add_qos_entry failed: %d\n", err); + goto out_unmap; + } + } + + priv->vlan_clsf_set = true; + +out_unmap: + dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE); +out_unmap_mask: + if (key_params.mask_iova) + dma_unmap_single(dev, key_params.mask_iova, sizeof(mask), + DMA_TO_DEVICE); +out_free: + kfree(params_iova); + return err; +} + +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev, + struct ieee_pfc *pfc) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_congestion_notification_cfg notification_cfg = {0}; + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + int err = 0, i; + + if (priv->pfc.pfc_en == pfc->pfc_en) + /* Same enabled mask, nothing to be done */ + return 0; + + err = set_vlan_qos(priv); + if (err) + return err; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state", err); + return err; + } + + cfg.rate = state.rate; + cfg.options = state.options; + if (pfc->pfc_en) + cfg.options |= DPNI_LINK_OPT_PFC_PAUSE; + else + cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) { + netdev_err(net_dev, "ERROR %d setting link cfg", err); + return err; + } + + memcpy(&priv->pfc, pfc, sizeof(priv->pfc)); + + err = set_rx_taildrop(priv); + if (err) + return err; + + /* configure congestion notifications */ + notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL; + notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; + notification_cfg.message_iova = 0ULL; + notification_cfg.message_ctx = 0ULL; + + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { + if (dpaa2_eth_is_pfc_enabled(priv, i)) { + notification_cfg.threshold_entry = NAPI_POLL_WEIGHT; + notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2; + } else { + notification_cfg.threshold_entry = 0; + notification_cfg.threshold_exit = 0; + } + + err = dpni_set_congestion_notification(priv->mc_io, 0, + priv->mc_token, + DPNI_QUEUE_RX, + i, ¬ification_cfg); + if (err) { + netdev_err(net_dev, "Error %d setting congestion notif", + err); + return err; + } + } + + return 0; +} + +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + return priv->dcbx_mode; +} + +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + priv->dcbx_mode = mode; + return 0; +} + +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << dpaa2_eth_tc_count(priv); + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx_mode; + break; + default: + *cap = false; + break; + } + + return 0; +} + +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = { + .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc, + .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc, + .getdcbx = dpaa2_eth_dcbnl_getdcbx, + .setdcbx = dpaa2_eth_dcbnl_setdcbx, + .getcap = dpaa2_eth_dcbnl_getcap, +}; +#endif + +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) +{ + struct device *dev; + struct net_device *net_dev = NULL; + struct dpaa2_eth_priv *priv = NULL; + int err = 0; + + dev = &dpni_dev->dev; + + /* Net device */ + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); + if (!net_dev) { + dev_err(dev, "alloc_etherdev_mq() failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(net_dev, dev); + dev_set_drvdata(dev, net_dev); + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + /* Obtain a MC portal */ + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + &priv->mc_io); + if (err) { + dev_dbg(dev, "MC portal allocation failed\n"); + err = -EPROBE_DEFER; + goto err_portal_alloc; + } + + /* MC objects initialization and configuration */ + err = setup_dpni(dpni_dev); + if (err) + goto err_dpni_setup; + + err = setup_dpio(priv); + if (err) { + dev_info(dev, "Defer probing as no DPIO available\n"); + err = -EPROBE_DEFER; + goto err_dpio_setup; + } + + setup_fqs(priv); + + err = setup_dpbp(priv); + if (err) + goto err_dpbp_setup; + + err = bind_dpni(priv); + if (err) + goto err_bind; + + /* Percpu statistics */ + priv->percpu_stats = alloc_percpu(*priv->percpu_stats); + if (!priv->percpu_stats) { + dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_stats; + } + priv->percpu_extras = alloc_percpu(*priv->percpu_extras); + if (!priv->percpu_extras) { + dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_extras; + } + + snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); + if (!dev_valid_name(net_dev->name)) { + dev_warn(&net_dev->dev, + "netdevice name \"%s\" cannot be used, reverting to default..\n", + net_dev->name); + dev_alloc_name(net_dev, "eth%d"); + dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); + } + + err = netdev_init(net_dev); + if (err) + goto err_netdev_init; + + /* Configure checksum offload based on current interface flags */ + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); + if (err) + goto err_csum; + + err = set_tx_csum(priv, !!(net_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); + if (err) + goto err_csum; + + err = alloc_rings(priv); + if (err) + goto err_alloc_rings; + + net_dev->ethtool_ops = &dpaa2_ethtool_ops; +#ifdef CONFIG_FSL_DPAA2_ETH_DCB + net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; + priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +#endif + + /* Add a NAPI context for each channel */ + add_ch_napi(priv); + enable_ch_napi(priv); + + err = setup_irqs(dpni_dev); + if (err) { + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); + priv->poll_thread = kthread_run(poll_link_state, priv, + "%s_poll_link", net_dev->name); + if (IS_ERR(priv->poll_thread)) { + netdev_err(net_dev, "Error starting polling thread\n"); + goto err_poll_thread; + } + priv->do_link_poll = true; + } + + dpaa2_eth_sysfs_init(&net_dev->dev); +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS + dpaa2_dbg_add(priv); +#endif + + dev_info(dev, "Probed interface %s\n", net_dev->name); + return 0; + +err_poll_thread: + free_rings(priv); +err_alloc_rings: +err_csum: + unregister_netdev(net_dev); +err_netdev_init: + free_percpu(priv->percpu_extras); +err_alloc_percpu_extras: + free_percpu(priv->percpu_stats); +err_alloc_percpu_stats: + disable_ch_napi(priv); + del_ch_napi(priv); +err_bind: + free_dpbp(priv); +err_dpbp_setup: + free_dpio(priv); +err_dpio_setup: + free_dpni(priv); +err_dpni_setup: + fsl_mc_portal_free(priv->mc_io); +err_portal_alloc: + dev_set_drvdata(dev, NULL); + free_netdev(net_dev); + + return err; +} + +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) +{ + struct device *dev; + struct net_device *net_dev; + struct dpaa2_eth_priv *priv; + + dev = &ls_dev->dev; + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS + dpaa2_dbg_remove(priv); +#endif + dpaa2_eth_sysfs_remove(&net_dev->dev); + + disable_ch_napi(priv); + del_ch_napi(priv); + + unregister_netdev(net_dev); + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(ls_dev); + + free_rings(priv); + free_percpu(priv->percpu_stats); + free_percpu(priv->percpu_extras); + free_dpbp(priv); + free_dpio(priv); + free_dpni(priv); + + fsl_mc_portal_free(priv->mc_io); + + dev_set_drvdata(dev, NULL); + free_netdev(net_dev); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpni", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); + +static struct fsl_mc_driver dpaa2_eth_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_eth_probe, + .remove = dpaa2_eth_remove, + .match_id_table = dpaa2_eth_match_id_table +}; + +static int __init dpaa2_eth_driver_init(void) +{ + int err; + + dpaa2_eth_dbg_init(); + err = fsl_mc_driver_register(&dpaa2_eth_driver); + if (err) { + dpaa2_eth_dbg_exit(); + return err; + } + + return 0; +} + +static void __exit dpaa2_eth_driver_exit(void) +{ + dpaa2_eth_dbg_exit(); + fsl_mc_driver_unregister(&dpaa2_eth_driver); +} + +module_init(dpaa2_eth_driver_init); +module_exit(dpaa2_eth_driver_exit); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h @@ -0,0 +1,499 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DPAA2_ETH_H +#define __DPAA2_ETH_H + +#include +#include +#include +#include +#include "../../fsl-mc/include/dpaa2-io.h" +#include "dpni.h" +#include "net.h" + +#include "dpaa2-eth-debugfs.h" + +#define DPAA2_ETH_STORE_SIZE 16 + +/* We set a max threshold for how many Tx confirmations we should process + * on a NAPI poll call, they take less processing time. + */ +#define TX_CONF_PER_NAPI_POLL 256 + +/* Maximum number of scatter-gather entries in an ingress frame, + * considering the maximum receive frame size is 64K + */ +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) + +/* Maximum acceptable MTU value. It is in direct relation with the hardware + * enforced Max Frame Length (currently 10k). + */ +#define DPAA2_ETH_MFL (10 * 1024) +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) +/* Convert L3 MTU to L2 MFL */ +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) + +/* Maximum burst size value for Tx shaping */ +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF + +/* Maximum number of buffers that can be acquired/released through a single + * QBMan command + */ +#define DPAA2_ETH_BUFS_PER_CMD 7 + +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo + * frames in the Rx queues (length of the current frame is not + * taken into account when making the taildrop decision) + */ +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) + +/* Buffer quota per queue. Must be large enough such that for minimum sized + * frames taildrop kicks in before the bpool gets depleted, so we compute + * how many 64B frames fit inside the taildrop threshold and add a margin + * to accommodate the buffer refill delay. + */ +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) +#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) +#define DPAA2_ETH_REFILL_THRESH_TD \ + (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD) + +/* Buffer quota per queue to use when flow control is active. */ +#define DPAA2_ETH_NUM_BUFS_FC 256 + +/* Hardware requires alignment for ingress/egress buffer addresses + * and ingress buffer lengths. + */ +#define DPAA2_ETH_RX_BUF_SIZE 2048 +#define DPAA2_ETH_TX_BUF_ALIGN 64 +#define DPAA2_ETH_RX_BUF_ALIGN 64 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ + ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD) + +/* rx_extra_head prevents reallocations in L3 processing. */ +#define DPAA2_ETH_SKB_SIZE \ + (DPAA2_ETH_RX_BUF_SIZE + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress + * buffers large enough to allow building an skb around them and also account + * for alignment restrictions. + */ +#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \ + (DPAA2_ETH_SKB_SIZE + \ + (p_priv)->rx_buf_align) + +/* PTP nominal frequency 1GHz */ +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 + +/* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware + * options are either 0 or 64, so we choose the latter. + */ +#define DPAA2_ETH_SWA_SIZE 64 + +/* Extra headroom space requested to hardware, in order to make sure there's + * no realloc'ing in forwarding scenarios + */ +#define DPAA2_ETH_RX_HEAD_ROOM \ + (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \ + DPAA2_ETH_TX_BUF_ALIGN) + +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ +struct dpaa2_eth_swa { + struct sk_buff *skb; + struct scatterlist *scl; + int num_sg; + int num_dma_bufs; +}; + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ + FD_CTRL_SBE | \ + FD_CTRL_FSE | \ + FD_CTRL_FAERR) + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ + +/* Size of hardware annotation area based on the current buffer layout + * configuration + */ +#define DPAA2_ETH_RX_HWA_SIZE 64 +#define DPAA2_ETH_TX_HWA_SIZE 128 + +/* Frame annotation status */ +struct dpaa2_fas { + u8 reserved; + u8 ppid; + __le16 ifpid; + __le32 status; +} __packed; + +/* Frame annotation status word is located in the first 8 bytes + * of the buffer's hardware annotation area + */ +#define DPAA2_FAS_OFFSET 0 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) + +/* Timestamp is located in the next 8 bytes of the buffer's + * hardware annotation area + */ +#define DPAA2_TS_OFFSET 0x8 + +/* Frame annotation egress action descriptor */ +#define DPAA2_FAEAD_OFFSET 0x58 + +struct dpaa2_faead { + __le32 conf_fqid; + __le32 ctrl; +}; + +#define DPAA2_FAEAD_A2V 0x20000000 +#define DPAA2_FAEAD_UPDV 0x00001000 +#define DPAA2_FAEAD_UPD 0x00000010 + +/* accessors for the hardware annotation fields that we use */ +#define dpaa2_eth_get_hwa(buf_addr) \ + ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE) + +#define dpaa2_eth_get_fas(buf_addr) \ + (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET) + +#define dpaa2_eth_get_ts(buf_addr) \ + (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET) + +#define dpaa2_eth_get_faead(buf_addr) \ + (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET) + +/* Error and status bits in the frame annotation status word */ +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_FAS_MS 0x40000000 +#define DPAA2_FAS_PTP 0x08000000 +/* Ethernet multicast frame */ +#define DPAA2_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_FAS_BC 0x02000000 +#define DPAA2_FAS_KSE 0x00040000 +#define DPAA2_FAS_EOFHE 0x00020000 +#define DPAA2_FAS_MNLE 0x00010000 +#define DPAA2_FAS_TIDE 0x00008000 +#define DPAA2_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_FAS_FLE 0x00002000 +/* Frame physical error */ +#define DPAA2_FAS_FPE 0x00001000 +#define DPAA2_FAS_PTE 0x00000080 +#define DPAA2_FAS_ISP 0x00000040 +#define DPAA2_FAS_PHE 0x00000020 +#define DPAA2_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_FAS_L4CE 0x00000001 +/* Possible errors on the ingress path */ +#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \ + (DPAA2_FAS_EOFHE) | \ + (DPAA2_FAS_MNLE) | \ + (DPAA2_FAS_TIDE) | \ + (DPAA2_FAS_PIEE) | \ + (DPAA2_FAS_FLE) | \ + (DPAA2_FAS_FPE) | \ + (DPAA2_FAS_PTE) | \ + (DPAA2_FAS_ISP) | \ + (DPAA2_FAS_PHE) | \ + (DPAA2_FAS_BLE) | \ + (DPAA2_FAS_L3CE) | \ + (DPAA2_FAS_L4CE)) +/* Tx errors */ +#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \ + (DPAA2_FAS_EOFHE) | \ + (DPAA2_FAS_MNLE) | \ + (DPAA2_FAS_TIDE)) + +/* Time in milliseconds between link state updates */ +#define DPAA2_ETH_LINK_STATE_REFRESH 1000 + +/* Number of times to retry a frame enqueue before giving up. + * Value determined empirically, in order to minimize the number + * of frames dropped on Tx + */ +#define DPAA2_ETH_ENQUEUE_RETRIES 10 + +/* Tx congestion entry & exit thresholds, in number of bytes. + * We allow a maximum of 512KB worth of frames pending processing on the Tx + * queues of an interface + */ +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024) +#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10) + +/* Driver statistics, other than those in struct rtnl_link_stats64. + * These are usually collected per-CPU and aggregated by ethtool. + */ +struct dpaa2_eth_drv_stats { + __u64 tx_conf_frames; + __u64 tx_conf_bytes; + __u64 tx_sg_frames; + __u64 tx_sg_bytes; + __u64 rx_sg_frames; + __u64 rx_sg_bytes; + /* Enqueues retried due to portal busy */ + __u64 tx_portal_busy; +}; + +/* Per-FQ statistics */ +struct dpaa2_eth_fq_stats { + /* Number of frames received on this queue */ + __u64 frames; + /* Number of times this queue entered congestion */ + __u64 congestion_entry; +}; + +/* Per-channel statistics */ +struct dpaa2_eth_ch_stats { + /* Volatile dequeues retried due to portal busy */ + __u64 dequeue_portal_busy; + /* Number of CDANs; useful to estimate avg NAPI len */ + __u64 cdan; + /* Number of frames received on queues from this channel */ + __u64 frames; + /* Pull errors */ + __u64 pull_err; +}; + +#define DPAA2_ETH_MAX_DPCONS NR_CPUS +#define DPAA2_ETH_MAX_TCS 8 + +/* Maximum number of queues associated with a DPNI */ +#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS) +#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ + DPAA2_ETH_MAX_TX_QUEUES + \ + DPAA2_ETH_MAX_RX_ERR_QUEUES) + +enum dpaa2_eth_fq_type { + DPAA2_RX_FQ = 0, + DPAA2_TX_CONF_FQ, + DPAA2_RX_ERR_FQ +}; + +struct dpaa2_eth_priv; + +struct dpaa2_eth_fq { + u32 fqid; + u32 tx_qdbin; + u16 flowid; + u8 tc; + int target_cpu; + struct dpaa2_eth_channel *channel; + enum dpaa2_eth_fq_type type; + + void (*consume)(struct dpaa2_eth_priv *, + struct dpaa2_eth_channel *, + const struct dpaa2_fd *, + struct napi_struct *, + u16 queue_id); + struct dpaa2_eth_fq_stats stats; +}; + +struct dpaa2_eth_channel { + struct dpaa2_io_notification_ctx nctx; + struct fsl_mc_device *dpcon; + int dpcon_id; + int ch_id; + int dpio_id; + struct napi_struct napi; + struct dpaa2_io_store *store; + struct dpaa2_eth_priv *priv; + int buf_count; + struct dpaa2_eth_ch_stats stats; +}; + +struct dpaa2_eth_cls_rule { + struct ethtool_rx_flow_spec fs; + bool in_use; +}; + +struct dpaa2_eth_hash_fields { + u64 rxnfc_field; + enum net_prot cls_prot; + int cls_field; + int offset; + int size; +}; + +/* Driver private data */ +struct dpaa2_eth_priv { + struct net_device *net_dev; + + /* Standard statistics */ + struct rtnl_link_stats64 __percpu *percpu_stats; + /* Extra stats, in addition to the ones known by the kernel */ + struct dpaa2_eth_drv_stats __percpu *percpu_extras; + struct iommu_domain *iommu_domain; + + bool ts_tx_en; /* Tx timestamping enabled */ + bool ts_rx_en; /* Rx timestamping enabled */ + + u16 tx_data_offset; + u16 rx_buf_align; + + u16 bpid; + u16 tx_qdid; + + int tx_pause_frames; + int num_bufs; + int refill_thresh; + + /* Tx congestion notifications are written here */ + void *cscn_mem; + void *cscn_unaligned; + dma_addr_t cscn_dma; + + u8 num_fqs; + /* Tx queues are at the beginning of the array */ + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + + u8 num_channels; + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; + + int dpni_id; + struct dpni_attr dpni_attrs; + struct fsl_mc_device *dpbp_dev; + + struct fsl_mc_io *mc_io; + /* SysFS-controlled affinity mask for TxConf FQs */ + struct cpumask txconf_cpumask; + /* Cores which have an affine DPIO/DPCON. + * This is the cpu set on which Rx frames are processed; + * Tx confirmation frames are processed on a subset of this, + * depending on user settings. + */ + struct cpumask dpio_cpumask; + + u16 mc_token; + + struct dpni_link_state link_state; + bool do_link_poll; + struct task_struct *poll_thread; + + struct dpaa2_eth_hash_fields *hash_fields; + u8 num_hash_fields; + /* enabled ethtool hashing bits */ + u64 rx_flow_hash; + +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS + struct dpaa2_debugfs dbg; +#endif + + /* array of classification rules */ + struct dpaa2_eth_cls_rule *cls_rule; + + struct dpni_tx_shaping_cfg shaping_cfg; + + u8 dcbx_mode; + struct ieee_pfc pfc; + bool vlan_clsf_set; +}; + +#define dpaa2_eth_hash_enabled(priv) \ + ((priv)->dpni_attrs.num_queues > 1) + +#define dpaa2_eth_fs_enabled(priv) \ + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS)) + +#define dpaa2_eth_fs_mask_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING) + +#define dpaa2_eth_fs_count(priv) \ + ((priv)->dpni_attrs.fs_entries) + +/* size of DMA memory used to pass configuration to classifier, in bytes */ +#define DPAA2_CLASSIFIER_DMA_SIZE 256 + +extern const struct ethtool_ops dpaa2_ethtool_ops; +extern const char dpaa2_eth_drv_version[]; + +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) +{ + return priv->dpni_attrs.num_queues; +} + +static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv) +{ + return priv->dpni_attrs.num_tcs; +} + +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv, + int traffic_class) +{ + return priv->pfc.pfc_en & (1 << traffic_class); +} + +enum dpaa2_eth_td_cfg { + DPAA2_ETH_TD_NONE, + DPAA2_ETH_TD_QUEUE, + DPAA2_ETH_TD_GROUP +}; + +static inline enum dpaa2_eth_td_cfg +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv) +{ + bool pfc_enabled = !!(priv->pfc.pfc_en); + + if (pfc_enabled) + return DPAA2_ETH_TD_GROUP; + else if (priv->tx_pause_frames) + return DPAA2_ETH_TD_NONE; + else + return DPAA2_ETH_TD_QUEUE; +} + +void check_cls_support(struct dpaa2_eth_priv *priv); + +int set_rx_taildrop(struct dpaa2_eth_priv *priv); +#endif /* __DPAA2_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c @@ -0,0 +1,864 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpni.h" /* DPNI_LINK_OPT_* */ +#include "dpaa2-eth.h" + +/* To be kept in sync with dpni_statistics */ +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "rx frames", + "rx bytes", + "rx mcast frames", + "rx mcast bytes", + "rx bcast frames", + "rx bcast bytes", + "tx frames", + "tx bytes", + "tx mcast frames", + "tx mcast bytes", + "tx bcast frames", + "tx bcast bytes", + "rx filtered frames", + "rx discarded frames", + "rx nobuffer discards", + "tx discarded frames", + "tx confirmed frames", +}; + +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) + +/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + /* per-cpu stats */ + + "tx conf frames", + "tx conf bytes", + "tx sg frames", + "tx sg bytes", + "rx sg frames", + "rx sg bytes", + /* how many times we had to retry the enqueue command */ + "enqueue portal busy", + + /* Channel stats */ + /* How many times we had to retry the volatile dequeue command */ + "dequeue portal busy", + "channel pull errors", + /* Number of notifications received */ + "cdan", + "tx congestion state", +#ifdef CONFIG_FSL_QBMAN_DEBUG + /* FQ stats */ + "rx pending frames", + "rx pending bytes", + "tx conf pending frames", + "tx conf pending bytes", + "buffer count" +#endif +}; + +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) + +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, dpaa2_eth_drv_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int dpaa2_eth_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct dpni_link_state state = {0}; + int err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPNI side - and for that matter there may exist + * no DPMAC at all. So for now we just don't report anything + * beyond the DPNI attributes. + */ + if (state.options & DPNI_LINK_OPT_AUTONEG) + cmd->autoneg = AUTONEG_ENABLE; + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) + cmd->duplex = DUPLEX_FULL; + ethtool_cmd_speed_set(cmd, state.rate); + +out: + return err; +} + +static int dpaa2_eth_set_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + int err = 0; + + netdev_dbg(net_dev, "Setting link parameters..."); + + /* Need to interrogate on link state to get flow control params */ + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state", err); + goto out; + } + + cfg.options = state.options; + cfg.rate = ethtool_cmd_speed(cmd); + if (cmd->autoneg == AUTONEG_ENABLE) + cfg.options |= DPNI_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPNI_LINK_OPT_AUTONEG; + if (cmd->duplex == DUPLEX_HALF) + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(net_dev, "ERROR %d setting link cfg", err); + +out: + return err; +} + +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_link_state state = {0}; + int err; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) + netdev_dbg(net_dev, "ERROR %d getting link state", err); + + /* for now, pause frames autonegotiation is not separate */ + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG); + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE); + pause->tx_pause = pause->rx_pause ^ + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE); +} + +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + u32 current_tx_pause; + int err = 0; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_dbg(net_dev, "ERROR %d getting link state", err); + goto out; + } + + cfg.rate = state.rate; + cfg.options = state.options; + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^ + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE); + + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG)) + netdev_warn(net_dev, + "WARN: Can't change pause frames autoneg separately\n"); + + if (pause->rx_pause) + cfg.options |= DPNI_LINK_OPT_PAUSE; + else + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + + if (pause->rx_pause ^ pause->tx_pause) + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + else + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) { + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(net_dev, "ERROR %d setting link cfg", err); + goto out; + } + + /* Enable / disable taildrops if Tx pause frames have changed */ + if (current_tx_pause == pause->tx_pause) + goto out; + + priv->tx_pause_frames = pause->tx_pause; + err = set_rx_taildrop(priv); + if (err) + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err); + + priv->tx_pause_frames = pause->tx_pause; +out: + return err; +} + +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + default: + return -EOPNOTSUPP; + } +} + +/** Fill in hardware counters, as returned by MC. + */ +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + int i = 0; /* Current index in the data array */ + int j = 0, k, err; + union dpni_statistics dpni_stats; + +#ifdef CONFIG_FSL_QBMAN_DEBUG + u32 fcnt, bcnt; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + u32 buf_cnt; +#endif + u64 cdan = 0; + u64 portal_busy = 0, pull_err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_drv_stats *extras; + struct dpaa2_eth_ch_stats *ch_stats; + + memset(data, 0, + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); + + /* Print standard counters, from DPNI statistics */ + for (j = 0; j <= 2; j++) { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, + j, &dpni_stats); + if (err != 0) + netdev_warn(net_dev, "Err %d getting DPNI stats page %d", + err, j); + + switch (j) { + case 0: + *(data + i++) = dpni_stats.page_0.ingress_all_frames; + *(data + i++) = dpni_stats.page_0.ingress_all_bytes; + *(data + i++) = dpni_stats.page_0.ingress_multicast_frames; + *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes; + *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames; + *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes; + break; + case 1: + *(data + i++) = dpni_stats.page_1.egress_all_frames; + *(data + i++) = dpni_stats.page_1.egress_all_bytes; + *(data + i++) = dpni_stats.page_1.egress_multicast_frames; + *(data + i++) = dpni_stats.page_1.egress_multicast_bytes; + *(data + i++) = dpni_stats.page_1.egress_broadcast_frames; + *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes; + break; + case 2: + *(data + i++) = dpni_stats.page_2.ingress_filtered_frames; + *(data + i++) = dpni_stats.page_2.ingress_discarded_frames; + *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards; + *(data + i++) = dpni_stats.page_2.egress_discarded_frames; + *(data + i++) = dpni_stats.page_2.egress_confirmed_frames; + break; + default: + break; + } + } + + /* Print per-cpu extra stats */ + for_each_online_cpu(k) { + extras = per_cpu_ptr(priv->percpu_extras, k); + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) + *((__u64 *)data + i + j) += *((__u64 *)extras + j); + } + + i += j; + + /* We may be using fewer DPIOs than actual CPUs */ + for (j = 0; j < priv->num_channels; j++) { + ch_stats = &priv->channel[j]->stats; + cdan += ch_stats->cdan; + portal_busy += ch_stats->dequeue_portal_busy; + pull_err += ch_stats->pull_err; + } + + *(data + i++) = portal_busy; + *(data + i++) = pull_err; + *(data + i++) = cdan; + + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem); + +#ifdef CONFIG_FSL_QBMAN_DEBUG + for (j = 0; j < priv->num_fqs; j++) { + /* Print FQ instantaneous counts */ + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, + &fcnt, &bcnt); + if (err) { + netdev_warn(net_dev, "FQ query error %d", err); + return; + } + + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { + fcnt_tx_total += fcnt; + bcnt_tx_total += bcnt; + } else { + fcnt_rx_total += fcnt; + bcnt_rx_total += bcnt; + } + } + + *(data + i++) = fcnt_rx_total; + *(data + i++) = bcnt_rx_total; + *(data + i++) = fcnt_tx_total; + *(data + i++) = bcnt_tx_total; + + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); + if (err) { + netdev_warn(net_dev, "Buffer count query error %d\n", err); + return; + } + *(data + i++) = buf_cnt; +#endif +} + +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) +{ + int i, off = 0; + + for (i = 0; i < priv->num_hash_fields; i++) { + if (priv->hash_fields[i].cls_prot == prot && + priv->hash_fields[i].cls_field == field) + return off; + off += priv->hash_fields[i].size; + } + + return -1; +} + +static u8 cls_key_size(struct dpaa2_eth_priv *priv) +{ + u8 i, size = 0; + + for (i = 0; i < priv->num_hash_fields; i++) + size += priv->hash_fields[i].size; + + return size; +} + +void check_cls_support(struct dpaa2_eth_priv *priv) +{ + u8 key_size = cls_key_size(priv); + struct device *dev = priv->net_dev->dev.parent; + + if (dpaa2_eth_hash_enabled(priv)) { + if (priv->dpni_attrs.fs_key_size < key_size) { + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", + priv->dpni_attrs.fs_key_size, + key_size); + goto disable_fs; + } + if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", + DPKG_MAX_NUM_OF_EXTRACTS); + goto disable_fs; + } + } + + if (dpaa2_eth_fs_enabled(priv)) { + if (!dpaa2_eth_hash_enabled(priv)) { + dev_info(dev, "Insufficient queues. Steering is disabled\n"); + goto disable_fs; + } + + if (!dpaa2_eth_fs_mask_enabled(priv)) { + dev_info(dev, "Key masks not supported. Steering is disabled\n"); + goto disable_fs; + } + } + + return; + +disable_fs: + priv->dpni_attrs.options |= DPNI_OPT_NO_FS; + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING; +} + +static int prep_l4_rule(struct dpaa2_eth_priv *priv, + struct ethtool_tcpip4_spec *l4_value, + struct ethtool_tcpip4_spec *l4_mask, + void *key, void *mask, u8 l4_proto) +{ + int offset; + + if (l4_mask->tos) { + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); + return -EOPNOTSUPP; + } + + if (l4_mask->ip4src) { + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); + *(u32 *)(key + offset) = l4_value->ip4src; + *(u32 *)(mask + offset) = l4_mask->ip4src; + } + + if (l4_mask->ip4dst) { + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); + *(u32 *)(key + offset) = l4_value->ip4dst; + *(u32 *)(mask + offset) = l4_mask->ip4dst; + } + + if (l4_mask->psrc) { + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(u32 *)(key + offset) = l4_value->psrc; + *(u32 *)(mask + offset) = l4_mask->psrc; + } + + if (l4_mask->pdst) { + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(u32 *)(key + offset) = l4_value->pdst; + *(u32 *)(mask + offset) = l4_mask->pdst; + } + + /* Only apply the rule for the user-specified L4 protocol + * and if ethertype matches IPv4 + */ + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(u16 *)(key + offset) = htons(ETH_P_IP); + *(u16 *)(mask + offset) = 0xFFFF; + + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); + *(u8 *)(key + offset) = l4_proto; + *(u8 *)(mask + offset) = 0xFF; + + /* TODO: check IP version */ + + return 0; +} + +static int prep_eth_rule(struct dpaa2_eth_priv *priv, + struct ethhdr *eth_value, struct ethhdr *eth_mask, + void *key, void *mask) +{ + int offset; + + if (eth_mask->h_proto) { + netdev_err(priv->net_dev, "Ethertype is not supported!\n"); + return -EOPNOTSUPP; + } + + if (!is_zero_ether_addr(eth_mask->h_source)) { + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); + ether_addr_copy(key + offset, eth_value->h_source); + ether_addr_copy(mask + offset, eth_mask->h_source); + } + + if (!is_zero_ether_addr(eth_mask->h_dest)) { + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + offset, eth_value->h_dest); + ether_addr_copy(mask + offset, eth_mask->h_dest); + } + + return 0; +} + +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, + struct ethtool_usrip4_spec *uip_value, + struct ethtool_usrip4_spec *uip_mask, + void *key, void *mask) +{ + int offset; + + if (uip_mask->tos) + return -EOPNOTSUPP; + + if (uip_mask->ip4src) { + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); + *(u32 *)(key + offset) = uip_value->ip4src; + *(u32 *)(mask + offset) = uip_mask->ip4src; + } + + if (uip_mask->ip4dst) { + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); + *(u32 *)(key + offset) = uip_value->ip4dst; + *(u32 *)(mask + offset) = uip_mask->ip4dst; + } + + if (uip_mask->proto) { + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); + *(u32 *)(key + offset) = uip_value->proto; + *(u32 *)(mask + offset) = uip_mask->proto; + } + if (uip_mask->l4_4_bytes) { + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; + + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; + } + + /* Ethertype must be IP */ + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); + *(u16 *)(key + offset) = htons(ETH_P_IP); + *(u16 *)(mask + offset) = 0xFFFF; + + return 0; +} + +static int prep_ext_rule(struct dpaa2_eth_priv *priv, + struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int offset; + + if (ext_mask->vlan_etype) + return -EOPNOTSUPP; + + if (ext_mask->vlan_tci) { + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); + *(u16 *)(key + offset) = ext_value->vlan_tci; + *(u16 *)(mask + offset) = ext_mask->vlan_tci; + } + + return 0; +} + +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, + struct ethtool_flow_ext *ext_value, + struct ethtool_flow_ext *ext_mask, + void *key, void *mask) +{ + int offset; + + if (!is_zero_ether_addr(ext_mask->h_dest)) { + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); + ether_addr_copy(key + offset, ext_value->h_dest); + ether_addr_copy(mask + offset, ext_mask->h_dest); + } + + return 0; +} + +static int prep_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + void *key) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + const u8 key_size = cls_key_size(priv); + void *msk = key + key_size; + int err; + + memset(key, 0, key_size * 2); + + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec, key, msk, + IPPROTO_TCP); + break; + case UDP_V4_FLOW: + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, + &fs->m_u.udp_ip4_spec, key, msk, + IPPROTO_UDP); + break; + case SCTP_V4_FLOW: + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, + &fs->m_u.sctp_ip4_spec, key, msk, + IPPROTO_SCTP); + break; + case ETHER_FLOW: + err = prep_eth_rule(priv, &fs->h_u.ether_spec, + &fs->m_u.ether_spec, key, msk); + break; + case IP_USER_FLOW: + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec, key, msk); + break; + default: + /* TODO: AH, ESP */ + return -EOPNOTSUPP; + } + if (err) + return err; + + if (fs->flow_type & FLOW_EXT) { + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); + if (err) + return err; + } + + if (fs->flow_type & FLOW_MAC_EXT) { + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); + if (err) + return err; + } + + return 0; +} + +static int del_cls(struct net_device *net_dev, int location); + +static int do_cls(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + bool add) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + const int rule_cnt = dpaa2_eth_fs_count(priv); + struct dpni_rule_cfg rule_cfg; + struct dpni_fs_action_cfg fs_act = { 0 }; + void *dma_mem; + int err = 0, tc; + + if (!dpaa2_eth_fs_enabled(priv)) { + netdev_err(net_dev, "dev does not support steering!\n"); + /* dev doesn't support steering */ + return -EOPNOTSUPP; + } + + if ((fs->ring_cookie != RX_CLS_FLOW_DISC && + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || + fs->location >= rule_cnt) + return -EINVAL; + + /* When adding a new rule, check if location if available, + * and if not free the existing table entry before inserting + * the new one + */ + if (add && (priv->cls_rule[fs->location].in_use == true)) + del_cls(net_dev, fs->location); + + memset(&rule_cfg, 0, sizeof(rule_cfg)); + rule_cfg.key_size = cls_key_size(priv); + + /* allocate twice the key size, for the actual key and for mask */ + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + err = prep_cls_rule(net_dev, fs, dma_mem); + if (err) + goto err_free_mem; + + rule_cfg.key_iova = dma_map_single(dev, dma_mem, + rule_cfg.key_size * 2, + DMA_TO_DEVICE); + + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + fs_act.options |= DPNI_FS_OPT_DISCARD; + else + fs_act.flow_id = fs->ring_cookie; + + for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) { + if (add) + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, + tc, fs->location, &rule_cfg, + &fs_act); + else + err = dpni_remove_fs_entry(priv->mc_io, 0, + priv->mc_token, tc, + &rule_cfg); + + if (err) + break; + } + + dma_unmap_single(dev, rule_cfg.key_iova, + rule_cfg.key_size * 2, DMA_TO_DEVICE); + + if (err) + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); + +err_free_mem: + kfree(dma_mem); + + return err; +} + +static int add_cls(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + err = do_cls(net_dev, fs, true); + if (err) + return err; + + priv->cls_rule[fs->location].in_use = true; + priv->cls_rule[fs->location].fs = *fs; + + return 0; +} + +static int del_cls(struct net_device *net_dev, int location) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + err = do_cls(net_dev, &priv->cls_rule[location].fs, false); + if (err) + return err; + + priv->cls_rule[location].in_use = false; + + return 0; +} + +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc) +{ + int err = 0; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = add_cls(net_dev, &rxnfc->fs); + break; + + case ETHTOOL_SRXCLSRLDEL: + err = del_cls(net_dev, rxnfc->fs.location); + break; + + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + const int rule_cnt = dpaa2_eth_fs_count(priv); + int i, j; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + /* we purposely ignore cmd->flow_type, because the hashing key + * is the same (and fixed) for all protocols + */ + rxnfc->data = priv->rx_flow_hash; + break; + + case ETHTOOL_GRXRINGS: + rxnfc->data = dpaa2_eth_queue_count(priv); + break; + + case ETHTOOL_GRXCLSRLCNT: + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) + if (priv->cls_rule[i].in_use) + rxnfc->rule_cnt++; + rxnfc->data = rule_cnt; + break; + + case ETHTOOL_GRXCLSRULE: + if (!priv->cls_rule[rxnfc->fs.location].in_use) + return -EINVAL; + + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; + break; + + case ETHTOOL_GRXCLSRLALL: + for (i = 0, j = 0; i < rule_cnt; i++) { + if (!priv->cls_rule[i].in_use) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + rxnfc->rule_cnt = j; + rxnfc->data = rule_cnt; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +const struct ethtool_ops dpaa2_ethtool_ops = { + .get_drvinfo = dpaa2_eth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = dpaa2_eth_get_settings, + .set_settings = dpaa2_eth_set_settings, + .get_pauseparam = dpaa2_eth_get_pauseparam, + .set_pauseparam = dpaa2_eth_set_pauseparam, + .get_sset_count = dpaa2_eth_get_sset_count, + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, + .get_strings = dpaa2_eth_get_strings, + .get_rxnfc = dpaa2_eth_get_rxnfc, + .set_rxnfc = dpaa2_eth_set_rxnfc, +}; --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h @@ -0,0 +1,176 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include +#include "net.h" + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + u8 mask; + u8 offset; +}; + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + /** + * union extract - Selects extraction method + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' + */ + union { + /** + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @prot: Any of the supported headers + * @type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @field: One of the supported fields (NH_FLD_) + * + * @size: Size in bytes + * @offset: Byte offset + * @hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + */ + + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + u32 field; + u8 size; + u8 offset; + u8 hdr_index; + } from_hdr; + /** + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @size: Size in bytes + * @offset: Byte offset + */ + struct { + u8 size; + u8 offset; + } from_data; + + /** + * struct from_parse - Used when + * 'type = DPKG_EXTRACT_FROM_PARSE' + * @size: Size in bytes + * @offset: Byte offset + */ + struct { + u8 size; + u8 offset; + } from_parse; + } extract; + + u8 num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + u8 num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +#endif /* __FSL_DPKG_H_ */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h @@ -0,0 +1,658 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 0 +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_2ND_VERSION 2 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) + +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010) +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011) +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) + +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240) +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241) +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D) +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265) + +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) + +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +struct dpni_cmd_open { + __le32 dpni_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpni_cmd_set_pools { + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; + struct { + __le16 dpbp_id; + u8 priority_mask; + u8 pad; + } pool[DPNI_MAX_DPBP]; + __le16 buffer_size[DPNI_MAX_DPBP]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + u8 enabled; +}; + +struct dpni_rsp_get_irq { + /* response word 0 */ + __le32 irq_val; + __le32 pad; + /* response word 1 */ + __le64 irq_addr; + /* response word 2 */ + __le32 irq_num; + __le32 type; +}; + +struct dpni_cmd_set_irq_enable { + u8 enable; + u8 pad[3]; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_enable { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_enable { + u8 enabled; +}; + +struct dpni_cmd_set_irq_mask { + __le32 mask; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_mask { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_mask { + __le32 mask; +}; + +struct dpni_cmd_get_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_status { + __le32 status; +}; + +struct dpni_cmd_clear_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + __le32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 pad0; + /* response word 1 */ + u8 vlan_filter_entries; + u8 pad1; + u8 qos_entries; + u8 pad2; + __le16 fs_entries; + __le16 pad3; + /* response word 2 */ + u8 qos_key_size; + u8 fs_key_size; + __le16 wriop_version; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + __le32 errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + u8 flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + u8 qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + u8 pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* response word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + u8 qtype; + u8 pad0[3]; + __le16 options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* cmd word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_offload { + u8 pad[3]; + u8 dpni_offload; + __le32 config; +}; + +struct dpni_cmd_get_offload { + u8 pad[3]; + u8 dpni_offload; +}; + +struct dpni_rsp_get_offload { + __le32 pad; + __le32 config; +}; + +struct dpni_cmd_get_qdid { + u8 qtype; +}; + +struct dpni_rsp_get_qdid { + __le16 qdid; +}; + +struct dpni_rsp_get_tx_data_offset { + __le16 data_offset; +}; + +struct dpni_cmd_get_statistics { + u8 page_number; +}; + +struct dpni_rsp_get_statistics { + __le64 counter[DPNI_STATISTICS_CNT]; +}; + +struct dpni_cmd_set_link_cfg { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + __le32 rate; + __le32 pad1; + /* cmd word 2 */ + __le64 options; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 + +struct dpni_rsp_get_link_state { + /* response word 0 */ + __le32 pad0; + /* from LSB: up:1 */ + u8 flags; + u8 pad1[3]; + /* response word 1 */ + __le32 rate; + __le32 pad2; + /* response word 2 */ + __le64 options; +}; + +struct dpni_cmd_set_tx_shaping { + /* cmd word 0 */ + __le16 max_burst_size; + __le16 pad0[3]; + /* cmd word 1 */ + __le32 rate_limit; +}; + +struct dpni_cmd_set_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_multicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_unicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_add_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_remove_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + u8 flags; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 + +struct dpni_cmd_set_rx_tc_dist { + /* cmd word 0 */ + __le16 dist_size; + u8 tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + u8 flags; + __le16 pad0; + __le16 default_flow_id; + /* cmd word 1..5 */ + __le64 pad1[5]; + /* cmd word 6 */ + __le64 key_cfg_iova; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + u8 mask; + u8 offset; +}; + +#define DPNI_EFH_TYPE_SHIFT 0 +#define DPNI_EFH_TYPE_SIZE 4 +#define DPNI_EXTRACT_TYPE_SHIFT 0 +#define DPNI_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + u8 prot; + /* EFH type stored in the 4 least significant bits */ + u8 efh_type; + u8 size; + u8 offset; + __le32 field; + /* word 1 */ + u8 hdr_index; + u8 constant; + u8 num_of_repeats; + u8 num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + u8 extract_type; + u8 pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + u8 num_extracts; + u8 pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +struct dpni_cmd_get_queue { + u8 qtype; + u8 tc; + u8 index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + __le64 pad0; + /* response word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */ + u8 flags; + /* response word 2 */ + __le64 flc; + /* response word 3 */ + __le64 user_context; + /* response word 4 */ + __le32 fqid; + __le16 qdbin; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + u8 qtype; + u8 tc; + u8 index; + u8 options; + __le32 pad0; + /* cmd word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + u8 flags; + /* cmd word 2 */ + __le64 flc; + /* cmd word 3 */ + __le64 user_context; +}; + +#define DPNI_DISCARD_ON_MISS_SHIFT 0 +#define DPNI_DISCARD_ON_MISS_SIZE 1 + +struct dpni_cmd_set_qos_table { + u32 pad; + u8 default_tc; + /* only the LSB */ + u8 discard_on_miss; + u16 pad1[21]; + u64 key_cfg_iova; +}; + +struct dpni_cmd_add_qos_entry { + u16 pad; + u8 tc_id; + u8 key_size; + u16 index; + u16 pad2; + u64 key_iova; + u64 mask_iova; +}; + +struct dpni_cmd_remove_qos_entry { + u8 pad1[3]; + u8 key_size; + u32 pad2; + u64 key_iova; + u64 mask_iova; +}; + +struct dpni_cmd_add_fs_entry { + /* cmd word 0 */ + u16 options; + u8 tc_id; + u8 key_size; + u16 index; + u16 flow_id; + /* cmd word 1 */ + u64 key_iova; + /* cmd word 2 */ + u64 mask_iova; + /* cmd word 3 */ + u64 flc; +}; + +struct dpni_cmd_remove_fs_entry { + /* cmd word 0 */ + __le16 pad0; + u8 tc_id; + u8 key_size; + __le32 pad1; + /* cmd word 1 */ + u64 key_iova; + /* cmd word 2 */ + u64 mask_iova; +}; + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; + __le32 pad0; + /* cmd word 1 */ + /* Only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +struct dpni_cmd_get_taildrop { + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + /* only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CONG_UNITS_SHIFT 4 +#define DPNI_CONG_UNITS_SIZE 2 + +struct dpni_cmd_set_congestion_notification { + /* cmd word 0 */ + u8 qtype; + u8 tc; + u8 pad[6]; + /* cmd word 1 */ + u32 dest_id; + u16 notification_mode; + u8 dest_priority; + /* from LSB: dest_type: 4 units:2 */ + u8 type_units; + /* cmd word 2 */ + u64 message_iova; + /* cmd word 3 */ + u64 message_ctx; + /* cmd word 4 */ + u32 threshold_entry; + u32 threshold_exit; +}; + +struct dpni_cmd_get_congestion_notification { + /* cmd word 0 */ + u8 qtype; + u8 tc; +}; + +struct dpni_rsp_get_congestion_notification { + /* cmd word 0 */ + u64 pad; + /* cmd word 1 */ + u32 dest_id; + u16 notification_mode; + u8 dest_priority; + /* from LSB: dest_type: 4 units:2 */ + u8 type_units; + /* cmd word 2 */ + u64 message_iova; + /* cmd word 3 */ + u64 message_ctx; + /* cmd word 4 */ + u32 threshold_entry; + u32 threshold_exit; +}; +#endif /* _FSL_DPNI_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c @@ -0,0 +1,1903 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "../../fsl-mc/include/mc-sys.h" +#include "../../fsl-mc/include/mc-cmd.h" +#include "dpni.h" +#include "dpni-cmd.h" + +/** + * dpni_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + */ +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpni_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpni_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPNI_MAX_DPBP; i++) { + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_tcs = rsp_params->num_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * this function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16(layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + + return 0; +} + +/** + * dpni_set_tx_shaping() - Set the transmit shaping + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tx_shaper: tx shaping configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_tx_shaping_cfg *tx_shaper) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_tx_shaping *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params; + cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size); + cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode); + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action); + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/* + * dpni_set_qos_table() - Set QoS mapping table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS table configuration + * + * This function and all QoS-related functions require that + *'max_tcs > 1' was set at DPNI creation. + * + * warning: Before calling this function, call dpkg_prepare_key_cfg() to + * prepare the key_cfg_iova parameter + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_qos_tbl_cfg *cfg) +{ + struct dpni_cmd_set_qos_table *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params; + cmd_params->default_tc = cfg->default_tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->discard_on_miss, + ENABLE, + cfg->discard_on_miss); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS rule to add + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS classification on + * this DPNI, it is ignored for exact match. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rule_cfg *cfg, + u8 tc_id, + u16 index) +{ + struct dpni_cmd_add_qos_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS + * classification on this DPNI, it is ignored for exact match. + * @cfg: Flow steering rule to add + * @action: Action to be taken as result of a classification hit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action) +{ + struct dpni_cmd_add_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + cmd_params->options = cpu_to_le16(action->options); + cmd_params->flow_id = cpu_to_le16(action->flow_id); + cmd_params->flc = cpu_to_le64(action->flc); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Flow steering rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_congestion_notification() - Set traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc_id, + const struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_cmd_set_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); + cmd_params->dest_priority = cfg->dest_cfg.priority; + dpni_set_field(cmd_params->type_units, DEST_TYPE, + cfg->dest_cfg.dest_type); + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units); + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_congestion_notification() - Get traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_congestion_notification( + struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc_id, + struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_rsp_get_congestion_notification *rsp_params; + struct dpni_cmd_get_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_GET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + cfg->dest_cfg.priority = rsp_params->dest_priority; + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, + DEST_TYPE); + + return 0; +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_reset_statistics() - Clears DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable); + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + + return 0; +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h @@ -0,0 +1,1053 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include "dpkg.h" + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 +/** + * Maximum number of senders + */ +#define DPNI_MAX_SENDERS 8 +/** + * Maximum distribution size + */ +#define DPNI_MAX_DIST_SIZE 8 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (u8)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (u16)(-1) +/** + * Generate new flow ID; see dpni_set_queue() + */ +#define DPNI_NEW_FLOW_ID (u16)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token); + +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + */ +struct dpni_pools_cfg { + u8 num_dpbp; + /** + * struct pools - Buffer pools parameters + * @dpbp_id: DPBP object ID + * @priority_mask: priorities served by DPBP + * @buffer_size: Buffer size + * @backup_pool: Backup pool + */ + struct { + u16 dpbp_id; + u8 priority_mask; + u16 buffer_size; + u8 backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key will + * result in an error. + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored + * on 6, 5, 5 bits respectively. + */ +struct dpni_attr { + u32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 vlan_filter_entries; + u8 qos_entries; + u16 fs_entries; + u8 qos_key_size; + u8 fs_key_size; + u16 wriop_version; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__ + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation + * status (FAS); relevant only for the non-discard action + */ +struct dpni_error_cfg { + u32 errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the buffer + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + u32 options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + u16 private_data_size; + u16 data_align; + u16 data_head_room; + u16 data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +union dpni_statistics { + /** + * struct page_0 - Page_0 statistics structure + * @ingress_all_frames: Ingress frame count + * @ingress_all_bytes: Ingress byte count + * @ingress_multicast_frames: Ingress multicast frame count + * @ingress_multicast_bytes: Ingress multicast byte count + * @ingress_broadcast_frames: Ingress broadcast frame count + * @ingress_broadcast_bytes: Ingress broadcast byte count + */ + struct { + u64 ingress_all_frames; + u64 ingress_all_bytes; + u64 ingress_multicast_frames; + u64 ingress_multicast_bytes; + u64 ingress_broadcast_frames; + u64 ingress_broadcast_bytes; + } page_0; + /** + * struct page_1 - Page_1 statistics structure + * @egress_all_frames: Egress frame count + * @egress_all_bytes: Egress byte count + * @egress_multicast_frames: Egress multicast frame count + * @egress_multicast_bytes: Egress multicast byte count + * @egress_broadcast_frames: Egress broadcast frame count + * @egress_broadcast_bytes: Egress broadcast byte count + */ + struct { + u64 egress_all_frames; + u64 egress_all_bytes; + u64 egress_multicast_frames; + u64 egress_multicast_bytes; + u64 egress_broadcast_frames; + u64 egress_broadcast_bytes; + } page_1; + /** + * struct page_2 - Page_2 statistics structure + * @ingress_filtered_frames: Ingress filtered frame count + * @ingress_discarded_frames: Ingress discarded frame count + * @ingress_nobuffer_discards: Ingress discarded frame count + * due to lack of buffers + * @egress_discarded_frames: Egress discarded frame count + * @egress_confirmed_frames: Egress confirmed frame count + */ + struct { + u64 ingress_filtered_frames; + u64 ingress_discarded_frames; + u64 ingress_nobuffer_discards; + u64 egress_discarded_frames; + u64 egress_confirmed_frames; + } page_2; + /** + * struct raw - raw statistics structure + */ + struct { + u64 counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat); + +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL +/** + * Enable priority flow control pause frames + */ +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_' values + */ +struct dpni_link_cfg { + u32 rate; + u64 options; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_' values + * @up: Link state; '0' for down, '1' for up + */ +struct dpni_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state); + +/** + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration + * @rate_limit: rate in Mbps + * @max_burst_size: burst size in bytes (up to 64KB) + */ +struct dpni_tx_shaping_cfg { + u32 rate_limit; + u16 max_burst_size; +}; + +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_tx_shaping_cfg *tx_shaper); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cm_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + u16 default_flow_id; +}; + +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + u8 *key_cfg_buf); + +/** + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * key extractions to be used as the QoS criteria by calling + * dpkg_prepare_key_cfg() + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); + * '0' to use the 'default_tc' in such cases + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 + */ +struct dpni_qos_tbl_cfg { + u64 key_cfg_iova; + int discard_on_miss; + u8 default_tc; +}; + +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_qos_tbl_cfg *cfg); + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpni_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + u16 dist_size; + enum dpni_dist_mode dist_mode; + u64 key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_queue - Queue structure + * @user_context: User data, presented to the user along with any frames from + * this queue. Not relevant for Tx queues. + */ +struct dpni_queue { +/** + * struct destination - Destination structure + * @id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. Not relevant for + * Tx queues. + * @type: May be one of the following: + * 0 - No destination, queue can be manually queried, but will not + * push traffic or notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic becomes available in + * the queue a FQDAN (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is associated with a + * DPCON object for the purpose of scheduling between multiple + * queues. The DPCON may be independently configured to + * generate notifications. Not relevant for Tx queues. + * @hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are not affined to a single DPIO. + */ + struct { + u16 id; + enum dpni_dest type; + char hold_active; + u8 priority; + } destination; + u64 user_context; + struct { + u64 value; + char stash_control; + } flc; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant + * for Tx queues. + */ +struct dpni_queue_id { + u32 fqid; + u16 qdbin; +}; + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 +#define DPNI_QUEUE_OPT_DEST 0x00000002 +#define DPNI_QUEUE_OPT_FLC 0x00000004 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and + * QUEUE_INDEX + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to + * define the DPNI this can be either per TC (default) or per + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + */ +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, +}; + +/** + * struct dpni_dest_cfg - Structure representing DPNI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPNI_DEST_NONE' option + */ +struct dpni_dest_cfg { + enum dpni_dest dest_type; + int dest_id; + u8 priority; +}; + +/* DPNI congestion options */ + +/** + * CSCN message is written to message_iova once entering a + * congestion state (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 +/** + * CSCN message is written to message_iova once exiting a + * congestion state (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 +/** + * CSCN write will attempt to allocate into a cache (coherent write); + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected + */ +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once entering a congestion state + * (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once exiting a congestion state + * (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) + */ +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 +/** + * This congestion will trigger flow control or priority flow control. + * This will have effect only if flow control is enabled with + * dpni_set_link_cfg(). + */ +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 + +/** + * struct dpni_congestion_notification_cfg - congestion notification + * configuration + * @units: units type + * @threshold_entry: above this threshold we enter a congestion state. + * set it to '0' to disable it + * @threshold_exit: below this threshold we exit the congestion state. + * @message_ctx: The context that will be part of the CSCN message + * @message_iova: I/O virtual address (must be in DMA-able memory), + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is + * contained in 'options' + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_' values + */ + +struct dpni_congestion_notification_cfg { + enum dpni_congestion_unit units; + u32 threshold_entry; + u32 threshold_exit; + u64 message_ctx; + u64 message_iova; + struct dpni_dest_cfg dest_cfg; + u16 notification_mode; +}; + +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc_id, + const struct dpni_congestion_notification_cfg *cfg); + +int dpni_get_congestion_notification( + struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc_id, + struct dpni_congestion_notification_cfg *cfg); + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports + * byte units, this field is ignored and assumed = 0 if + * CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, THRESHOLD must + * be > 0 if the taildrop is enabled. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + u32 threshold; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +/** + * struct dpni_rule_cfg - Rule configuration for table lookup + * @key_iova: I/O virtual address of the key (must be in DMA-able memory) + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) + * @key_size: key and mask size (in bytes) + */ +struct dpni_rule_cfg { + u64 key_iova; + u64 mask_iova; + u8 key_size; +}; + +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rule_cfg *cfg, + u8 tc_id, + u16 index); + +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_rule_cfg *cfg); + +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. + */ + #define DPNI_FS_OPT_DISCARD 0x1 + +/** + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to + * override the FLC value set per queue. + * For more details check the Frame Descriptor section in the hardware + * documentation. + */ +#define DPNI_FS_OPT_SET_FLC 0x2 + +/* + * Indicates whether the 6 lowest significant bits of FLC are used for stash + * control. If set, the 6 least significant bits in value are interpreted as + * follows: + * - bits 0-1: indicates the number of 64 byte units of context that are + * stashed. FLC value is interpreted as a memory address in this case, + * excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame annotation + * to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame data to be + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. + */ +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 + +/** + * struct dpni_fs_action_cfg - Action configuration for table look-up + * @flc: FLC value for traffic matching this rule. Please check the Frame + * Descriptor section in the hardware documentation for more information. + * @flow_id: Identifies the Rx queue used for matching traffic. Supported + * values are in range 0 to num_queue-1. + * @options: Any combination of DPNI_FS_OPT_ values. + */ +struct dpni_fs_action_cfg { + u64 flc; + u16 flow_id; + u16 options; +}; + +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + u16 index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action); + +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rule_cfg *cfg); + +#endif /* __FSL_DPNI_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h @@ -0,0 +1,480 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_NET_H +#define __FSL_NET_H + +#define LAST_HDR_INDEX 0xFFFFFFFF + +/*****************************************************************************/ +/* Protocol fields */ +/*****************************************************************************/ + +/************************* Ethernet fields *********************************/ +#define NH_FLD_ETH_DA (1) +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) + +#define NH_FLD_ETH_ADDR_SIZE 6 + +/*************************** VLAN fields ***********************************/ +#define NH_FLD_VLAN_VPRI (1) +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/************************ IP (generic) fields ******************************/ +#define NH_FLD_IP_VER (1) +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) + +#define NH_FLD_IP_PROTO_SIZE 1 + +/***************************** IPV4 fields *********************************/ +#define NH_FLD_IPV4_VER (1) +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) + +#define NH_FLD_IPV4_ADDR_SIZE 4 +#define NH_FLD_IPV4_PROTO_SIZE 1 + +/***************************** IPV6 fields *********************************/ +#define NH_FLD_IPV6_VER (1) +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) + +#define NH_FLD_IPV6_ADDR_SIZE 16 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 + +/***************************** ICMP fields *********************************/ +#define NH_FLD_ICMP_TYPE (1) +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) + +#define NH_FLD_ICMP_CODE_SIZE 1 +#define NH_FLD_ICMP_TYPE_SIZE 1 + +/***************************** IGMP fields *********************************/ +#define NH_FLD_IGMP_VERSION (1) +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) + +/***************************** TCP fields **********************************/ +#define NH_FLD_TCP_PORT_SRC (1) +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) + +#define NH_FLD_TCP_PORT_SIZE 2 + +/***************************** UDP fields **********************************/ +#define NH_FLD_UDP_PORT_SRC (1) +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) + +#define NH_FLD_UDP_PORT_SIZE 2 + +/*************************** UDP-lite fields *******************************/ +#define NH_FLD_UDP_LITE_PORT_SRC (1) +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) +#define NH_FLD_UDP_LITE_ALL_FIELDS \ + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) + +#define NH_FLD_UDP_LITE_PORT_SIZE 2 + +/*************************** UDP-encap-ESP fields **************************/ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) + +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_PORT_SRC (1) +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) + +#define NH_FLD_SCTP_PORT_SIZE 2 + +/***************************** DCCP fields *********************************/ +#define NH_FLD_DCCP_PORT_SRC (1) +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) + +#define NH_FLD_DCCP_PORT_SIZE 2 + +/***************************** IPHC fields *********************************/ +#define NH_FLD_IPHC_CID (1) +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) + +/*************************** L2TPV2 fields *********************************/ +#define NH_FLD_L2TPV2_TYPE_BIT (1) +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) +#define NH_FLD_L2TPV2_ALL_FIELDS \ + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) + +/*************************** L2TPV3 fields *********************************/ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) + +/**************************** PPP fields ***********************************/ +#define NH_FLD_PPP_PID (1) +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) + +/************************** PPPoE fields ***********************************/ +#define NH_FLD_PPPOE_VER (1) +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) + +/************************* PPP-Mux fields **********************************/ +#define NH_FLD_PPPMUX_PID (1) +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) + +/*********************** PPP-Mux sub-frame fields **************************/ +#define NH_FLD_PPPMUX_SUBFRM_PFF (1) +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) + +/*************************** LLC fields ************************************/ +#define NH_FLD_LLC_DSAP (1) +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) + +/*************************** NLPID fields **********************************/ +#define NH_FLD_NLPID_NLPID (1) +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) + +/*************************** SNAP fields ***********************************/ +#define NH_FLD_SNAP_OUI (1) +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) + +/*************************** LLC SNAP fields *******************************/ +#define NH_FLD_LLC_SNAP_TYPE (1) +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) + +#define NH_FLD_ARP_HTYPE (1) +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) + +/*************************** RFC2684 fields ********************************/ +#define NH_FLD_RFC2684_LLC (1) +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) + +/*************************** User defined fields ***************************/ +#define NH_FLD_USER_DEFINED_SRCPORT (1) +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS \ + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) + +/*************************** Payload fields ********************************/ +#define NH_FLD_PAYLOAD_BUFFER (1) +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) + +/*************************** GRE fields ************************************/ +#define NH_FLD_GRE_TYPE (1) +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) + +/*************************** MINENCAP fields *******************************/ +#define NH_FLD_MINENCAP_SRC_IP (1) +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) +#define NH_FLD_MINENCAP_ALL_FIELDS \ + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) + +/*************************** IPSEC AH fields *******************************/ +#define NH_FLD_IPSEC_AH_SPI (1) +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) + +/*************************** IPSEC ESP fields ******************************/ +#define NH_FLD_IPSEC_ESP_SPI (1) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) + +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 + +/*************************** MPLS fields ***********************************/ +#define NH_FLD_MPLS_LABEL_STACK (1) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) + +/*************************** MACSEC fields *********************************/ +#define NH_FLD_MACSEC_SECTAG (1) +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) + +/*************************** GTP fields ************************************/ +#define NH_FLD_GTP_TEID (1) + +/* Protocol options */ + +/* Ethernet options */ +#define NH_OPT_ETH_BROADCAST 1 +#define NH_OPT_ETH_MULTICAST 2 +#define NH_OPT_ETH_UNICAST 3 +#define NH_OPT_ETH_BPDU 4 + +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) +/* also applicable for broadcast */ + +/* VLAN options */ +#define NH_OPT_VLAN_CFI 1 + +/* IPV4 options */ +#define NH_OPT_IPV4_UNICAST 1 +#define NH_OPT_IPV4_MULTICAST 2 +#define NH_OPT_IPV4_BROADCAST 3 +#define NH_OPT_IPV4_OPTION 4 +#define NH_OPT_IPV4_FRAG 5 +#define NH_OPT_IPV4_INITIAL_FRAG 6 + +/* IPV6 options */ +#define NH_OPT_IPV6_UNICAST 1 +#define NH_OPT_IPV6_MULTICAST 2 +#define NH_OPT_IPV6_OPTION 3 +#define NH_OPT_IPV6_FRAG 4 +#define NH_OPT_IPV6_INITIAL_FRAG 5 + +/* General IP options (may be used for any version) */ +#define NH_OPT_IP_FRAG 1 +#define NH_OPT_IP_INITIAL_FRAG 2 +#define NH_OPT_IP_OPTION 3 + +/* Minenc. options */ +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 + +/* GRE. options */ +#define NH_OPT_GRE_ROUTING_PRESENT 1 + +/* TCP options */ +#define NH_OPT_TCP_OPTIONS 1 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3 + +/* CAPWAP options */ +#define NH_OPT_CAPWAP_DTLS 1 + +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/*! IEEE8021.Q */ +#define NH_IEEE8021Q_ETYPE 0x8100 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ + ((((u32)((etype) & 0xFFFF)) << 16) | \ + (((u32)((pcp) & 0x07)) << 13) | \ + (((u32)((dei) & 0x01)) << 12) | \ + (((u32)((vlan_id) & 0xFFF)))) + +#endif /* __FSL_NET_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig @@ -0,0 +1,6 @@ +config FSL_DPAA2_ETHSW + tristate "DPAA2 Ethernet Switch" + depends on FSL_MC_BUS && FSL_DPAA2 + default y + ---help--- + Prototype driver for DPAA2 Ethernet Switch. --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile @@ -0,0 +1,10 @@ + +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o + +dpaa2-ethsw-objs := switch.o dpsw.o + +all: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules + +clean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h @@ -0,0 +1,851 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPSW_CMD_H +#define __FSL_DPSW_CMD_H + +/* DPSW Version */ +#define DPSW_VER_MAJOR 8 +#define DPSW_VER_MINOR 0 + +#define DPSW_CMD_BASE_VERSION 1 +#define DPSW_CMD_ID_OFFSET 4 + +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800) +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802) + +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02) + +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002) +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003) +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004) +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005) +#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006) + +#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010) +#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011) +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012) +#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013) +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014) +#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015) +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016) +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017) + +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022) + +#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024) + +#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026) + +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030) +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031) +#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032) +#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033) +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034) +#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035) +#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036) +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037) +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038) +#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039) +#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A) +#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B) + +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) + +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042) + +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044) +#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045) +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046) +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047) +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048) +#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049) +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A) + +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C) + +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060) +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061) +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062) +#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063) +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064) +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065) +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066) +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067) +#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068) +#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069) +#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A) +#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B) + +#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080) +#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081) +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082) +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083) +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084) +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085) +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086) +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087) +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088) +#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089) + +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090) +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091) +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092) +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093) +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094) +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095) +#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096) + +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0) +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1) +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2) +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPSW_MASK(field) \ + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \ + DPSW_##field##_SHIFT) +#define dpsw_set_field(var, field, val) \ + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field))) +#define dpsw_get_field(var, field) \ + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT) +#define dpsw_get_bit(var, bit) \ + (((var) >> (bit)) & GENMASK(0, 0)) + +static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val) +{ + var |= (u64)val << bit & GENMASK(bit, bit); + return var; +} + +struct dpsw_cmd_open { + __le32 dpsw_id; +}; + +#define DPSW_COMPONENT_TYPE_SHIFT 0 +#define DPSW_COMPONENT_TYPE_SIZE 4 + +struct dpsw_cmd_create { + /* cmd word 0 */ + __le16 num_ifs; + u8 max_fdbs; + u8 max_meters_per_if; + /* from LSB: only the first 4 bits */ + u8 component_type; + u8 pad[3]; + /* cmd word 1 */ + __le16 max_vlans; + __le16 max_fdb_entries; + __le16 fdb_aging_time; + __le16 max_fdb_mc_groups; + /* cmd word 2 */ + __le64 options; +}; + +struct dpsw_cmd_destroy { + __le32 dpsw_id; +}; + +#define DPSW_ENABLE_SHIFT 0 +#define DPSW_ENABLE_SIZE 1 + +struct dpsw_rsp_is_enabled { + /* from LSB: enable:1 */ + u8 enabled; +}; + +struct dpsw_cmd_set_irq { + /* cmd word 0 */ + u8 irq_index; + u8 pad[3]; + __le32 irq_val; + /* cmd word 1 */ + __le64 irq_addr; + /* cmd word 2 */ + __le32 irq_num; +}; + +struct dpsw_cmd_get_irq { + __le32 pad; + u8 irq_index; +}; + +struct dpsw_rsp_get_irq { + /* cmd word 0 */ + __le32 irq_val; + __le32 pad; + /* cmd word 1 */ + __le64 irq_addr; + /* cmd word 2 */ + __le32 irq_num; + __le32 irq_type; +}; + +struct dpsw_cmd_set_irq_enable { + u8 enable_state; + u8 pad[3]; + u8 irq_index; +}; + +struct dpsw_cmd_get_irq_enable { + __le32 pad; + u8 irq_index; +}; + +struct dpsw_rsp_get_irq_enable { + u8 enable_state; +}; + +struct dpsw_cmd_set_irq_mask { + __le32 mask; + u8 irq_index; +}; + +struct dpsw_cmd_get_irq_mask { + __le32 pad; + u8 irq_index; +}; + +struct dpsw_rsp_get_irq_mask { + __le32 mask; +}; + +struct dpsw_cmd_get_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpsw_rsp_get_irq_status { + __le32 status; +}; + +struct dpsw_cmd_clear_irq_status { + __le32 status; + u8 irq_index; +}; + +#define DPSW_COMPONENT_TYPE_SHIFT 0 +#define DPSW_COMPONENT_TYPE_SIZE 4 + +struct dpsw_rsp_get_attr { + /* cmd word 0 */ + __le16 num_ifs; + u8 max_fdbs; + u8 num_fdbs; + __le16 max_vlans; + __le16 num_vlans; + /* cmd word 1 */ + __le16 max_fdb_entries; + __le16 fdb_aging_time; + __le32 dpsw_id; + /* cmd word 2 */ + __le16 mem_size; + __le16 max_fdb_mc_groups; + u8 max_meters_per_if; + /* from LSB only the ffirst 4 bits */ + u8 component_type; + __le16 pad; + /* cmd word 3 */ + __le64 options; +}; + +struct dpsw_cmd_set_reflection_if { + __le16 if_id; +}; + +struct dpsw_cmd_if_set_flooding { + __le16 if_id; + /* from LSB: enable:1 */ + u8 enable; +}; + +struct dpsw_cmd_if_set_broadcast { + __le16 if_id; + /* from LSB: enable:1 */ + u8 enable; +}; + +struct dpsw_cmd_if_set_multicast { + __le16 if_id; + /* from LSB: enable:1 */ + u8 enable; +}; + +#define DPSW_VLAN_ID_SHIFT 0 +#define DPSW_VLAN_ID_SIZE 12 +#define DPSW_DEI_SHIFT 12 +#define DPSW_DEI_SIZE 1 +#define DPSW_PCP_SHIFT 13 +#define DPSW_PCP_SIZE 3 + +struct dpsw_cmd_if_set_tci { + __le16 if_id; + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */ + __le16 conf; +}; + +struct dpsw_cmd_if_get_tci { + __le16 if_id; +}; + +struct dpsw_rsp_if_get_tci { + __le16 pad; + __le16 vlan_id; + u8 dei; + u8 pcp; +}; + +#define DPSW_STATE_SHIFT 0 +#define DPSW_STATE_SIZE 4 + +struct dpsw_cmd_if_set_stp { + __le16 if_id; + __le16 vlan_id; + /* only the first LSB 4 bits */ + u8 state; +}; + +#define DPSW_FRAME_TYPE_SHIFT 0 +#define DPSW_FRAME_TYPE_SIZE 4 +#define DPSW_UNACCEPTED_ACT_SHIFT 4 +#define DPSW_UNACCEPTED_ACT_SIZE 4 + +struct dpsw_cmd_if_set_accepted_frames { + __le16 if_id; + /* from LSB: type:4 unaccepted_act:4 */ + u8 unaccepted; +}; + +#define DPSW_ACCEPT_ALL_SHIFT 0 +#define DPSW_ACCEPT_ALL_SIZE 1 + +struct dpsw_cmd_if_set_accept_all_vlan { + __le16 if_id; + /* only the least significant bit */ + u8 accept_all; +}; + +#define DPSW_COUNTER_TYPE_SHIFT 0 +#define DPSW_COUNTER_TYPE_SIZE 5 + +struct dpsw_cmd_if_get_counter { + __le16 if_id; + /* from LSB: type:5 */ + u8 type; +}; + +struct dpsw_rsp_if_get_counter { + __le64 pad; + __le64 counter; +}; + +struct dpsw_cmd_if_set_counter { + /* cmd word 0 */ + __le16 if_id; + /* from LSB: type:5 */ + u8 type; + /* cmd word 1 */ + __le64 counter; +}; + +#define DPSW_PRIORITY_SELECTOR_SHIFT 0 +#define DPSW_PRIORITY_SELECTOR_SIZE 3 +#define DPSW_SCHED_MODE_SHIFT 0 +#define DPSW_SCHED_MODE_SIZE 4 + +struct dpsw_cmd_if_set_tx_selection { + __le16 if_id; + /* from LSB: priority_selector:3 */ + u8 priority_selector; + u8 pad[5]; + u8 tc_id[8]; + + struct dpsw_tc_sched { + __le16 delta_bandwidth; + u8 mode; + u8 pad; + } tc_sched[8]; +}; + +#define DPSW_FILTER_SHIFT 0 +#define DPSW_FILTER_SIZE 2 + +struct dpsw_cmd_if_reflection { + __le16 if_id; + __le16 vlan_id; + /* only 2 bits from the LSB */ + u8 filter; +}; + +#define DPSW_MODE_SHIFT 0 +#define DPSW_MODE_SIZE 4 +#define DPSW_UNITS_SHIFT 4 +#define DPSW_UNITS_SIZE 4 + +struct dpsw_cmd_if_set_flooding_metering { + /* cmd word 0 */ + __le16 if_id; + u8 pad; + /* from LSB: mode:4 units:4 */ + u8 mode_units; + __le32 cir; + /* cmd word 1 */ + __le32 eir; + __le32 cbs; + /* cmd word 2 */ + __le32 ebs; +}; + +struct dpsw_cmd_if_set_metering { + /* cmd word 0 */ + __le16 if_id; + u8 tc_id; + /* from LSB: mode:4 units:4 */ + u8 mode_units; + __le32 cir; + /* cmd word 1 */ + __le32 eir; + __le32 cbs; + /* cmd word 2 */ + __le32 ebs; +}; + +#define DPSW_EARLY_DROP_MODE_SHIFT 0 +#define DPSW_EARLY_DROP_MODE_SIZE 2 +#define DPSW_EARLY_DROP_UNIT_SHIFT 2 +#define DPSW_EARLY_DROP_UNIT_SIZE 2 + +struct dpsw_prep_early_drop { + /* from LSB: mode:2 units:2 */ + u8 conf; + u8 pad0[3]; + __le32 tail_drop_threshold; + u8 green_drop_probability; + u8 pad1[7]; + __le64 green_max_threshold; + __le64 green_min_threshold; + __le64 pad2; + u8 yellow_drop_probability; + u8 pad3[7]; + __le64 yellow_max_threshold; + __le64 yellow_min_threshold; +}; + +struct dpsw_cmd_if_set_early_drop { + /* cmd word 0 */ + u8 pad0; + u8 tc_id; + __le16 if_id; + __le32 pad1; + /* cmd word 1 */ + __le64 early_drop_iova; +}; + +struct dpsw_cmd_custom_tpid { + __le16 pad; + __le16 tpid; +}; + +struct dpsw_cmd_if { + __le16 if_id; +}; + +#define DPSW_ADMIT_UNTAGGED_SHIFT 0 +#define DPSW_ADMIT_UNTAGGED_SIZE 4 +#define DPSW_ENABLED_SHIFT 5 +#define DPSW_ENABLED_SIZE 1 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1 + +struct dpsw_rsp_if_get_attr { + /* cmd word 0 */ + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */ + u8 conf; + u8 pad1; + u8 num_tcs; + u8 pad2; + __le16 qdid; + /* cmd word 1 */ + __le32 options; + __le32 pad3; + /* cmd word 2 */ + __le32 rate; +}; + +struct dpsw_cmd_if_set_max_frame_length { + __le16 if_id; + __le16 frame_length; +}; + +struct dpsw_cmd_if_get_max_frame_length { + __le16 if_id; +}; + +struct dpsw_rsp_if_get_max_frame_length { + __le16 pad; + __le16 frame_length; +}; + +struct dpsw_cmd_if_set_link_cfg { + /* cmd word 0 */ + __le16 if_id; + u8 pad[6]; + /* cmd word 1 */ + __le32 rate; + __le32 pad1; + /* cmd word 2 */ + __le64 options; +}; + +struct dpsw_cmd_if_get_link_state { + __le16 if_id; +}; + +#define DPSW_UP_SHIFT 0 +#define DPSW_UP_SIZE 1 + +struct dpsw_rsp_if_get_link_state { + /* cmd word 0 */ + __le32 pad0; + u8 up; + u8 pad1[3]; + /* cmd word 1 */ + __le32 rate; + __le32 pad2; + /* cmd word 2 */ + __le64 options; +}; + +struct dpsw_vlan_add { + __le16 fdb_id; + __le16 vlan_id; +}; + +struct dpsw_cmd_vlan_manage_if { + /* cmd word 0 */ + __le16 pad0; + __le16 vlan_id; + __le32 pad1; + /* cmd word 1 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_vlan_remove { + __le16 pad; + __le16 vlan_id; +}; + +struct dpsw_cmd_vlan_get_attr { + __le16 vlan_id; +}; + +struct dpsw_rsp_vlan_get_attr { + /* cmd word 0 */ + __le64 pad; + /* cmd word 1 */ + __le16 fdb_id; + __le16 num_ifs; + __le16 num_untagged_ifs; + __le16 num_flooding_ifs; +}; + +struct dpsw_cmd_vlan_get_if { + __le16 vlan_id; +}; + +struct dpsw_rsp_vlan_get_if { + /* cmd word 0 */ + __le16 pad0; + __le16 num_ifs; + u8 pad1[4]; + /* cmd word 1 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_vlan_get_if_untagged { + __le16 vlan_id; +}; + +struct dpsw_rsp_vlan_get_if_untagged { + /* cmd word 0 */ + __le16 pad0; + __le16 num_ifs; + u8 pad1[4]; + /* cmd word 1 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_vlan_get_if_flooding { + __le16 vlan_id; +}; + +struct dpsw_rsp_vlan_get_if_flooding { + /* cmd word 0 */ + __le16 pad0; + __le16 num_ifs; + u8 pad1[4]; + /* cmd word 1 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_fdb_add { + __le32 pad; + __le16 fdb_aging_time; + __le16 num_fdb_entries; +}; + +struct dpsw_rsp_fdb_add { + __le16 fdb_id; +}; + +struct dpsw_cmd_fdb_remove { + __le16 fdb_id; +}; + +#define DPSW_ENTRY_TYPE_SHIFT 0 +#define DPSW_ENTRY_TYPE_SIZE 4 + +struct dpsw_cmd_fdb_add_unicast { + /* cmd word 0 */ + __le16 fdb_id; + u8 mac_addr[6]; + /* cmd word 1 */ + u8 if_egress; + u8 pad; + /* only the first 4 bits from LSB */ + u8 type; +}; + +struct dpsw_cmd_fdb_get_unicast { + __le16 fdb_id; + u8 mac_addr[6]; +}; + +struct dpsw_rsp_fdb_get_unicast { + __le64 pad; + __le16 if_egress; + /* only first 4 bits from LSB */ + u8 type; +}; + +struct dpsw_cmd_fdb_remove_unicast { + /* cmd word 0 */ + __le16 fdb_id; + u8 mac_addr[6]; + /* cmd word 1 */ + __le16 if_egress; + /* only the first 4 bits from LSB */ + u8 type; +}; + +struct dpsw_cmd_fdb_add_multicast { + /* cmd word 0 */ + __le16 fdb_id; + __le16 num_ifs; + /* only the first 4 bits from LSB */ + u8 type; + u8 pad[3]; + /* cmd word 1 */ + u8 mac_addr[6]; + __le16 pad2; + /* cmd word 2 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_fdb_get_multicast { + __le16 fdb_id; + u8 mac_addr[6]; +}; + +struct dpsw_rsp_fdb_get_multicast { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + __le16 num_ifs; + /* only the first 4 bits from LSB */ + u8 type; + u8 pad1[5]; + /* cmd word 2 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_fdb_remove_multicast { + /* cmd word 0 */ + __le16 fdb_id; + __le16 num_ifs; + /* only the first 4 bits from LSB */ + u8 type; + u8 pad[3]; + /* cmd word 1 */ + u8 mac_addr[6]; + __le16 pad2; + /* cmd word 2 */ + __le64 if_id[4]; +}; + +#define DPSW_LEARNING_MODE_SHIFT 0 +#define DPSW_LEARNING_MODE_SIZE 4 + +struct dpsw_cmd_fdb_set_learning_mode { + __le16 fdb_id; + /* only the first 4 bits from LSB */ + u8 mode; +}; + +struct dpsw_cmd_fdb_get_attr { + __le16 fdb_id; +}; + +struct dpsw_rsp_fdb_get_attr { + /* cmd word 0 */ + __le16 pad; + __le16 max_fdb_entries; + __le16 fdb_aging_time; + __le16 num_fdb_mc_groups; + /* cmd word 1 */ + __le16 max_fdb_mc_groups; + /* only the first 4 bits from LSB */ + u8 learning_mode; +}; + +struct dpsw_cmd_acl_add { + __le16 pad; + __le16 max_entries; +}; + +struct dpsw_rsp_acl_add { + __le16 acl_id; +}; + +struct dpsw_cmd_acl_remove { + __le16 acl_id; +}; + +struct dpsw_prep_acl_entry { + u8 match_l2_dest_mac[6]; + __le16 match_l2_tpid; + + u8 match_l2_source_mac[6]; + __le16 match_l2_vlan_id; + + __le32 match_l3_dest_ip; + __le32 match_l3_source_ip; + + __le16 match_l4_dest_port; + __le16 match_l4_source_port; + __le16 match_l2_ether_type; + u8 match_l2_pcp_dei; + u8 match_l3_dscp; + + u8 mask_l2_dest_mac[6]; + __le16 mask_l2_tpid; + + u8 mask_l2_source_mac[6]; + __le16 mask_l2_vlan_id; + + __le32 mask_l3_dest_ip; + __le32 mask_l3_source_ip; + + __le16 mask_l4_dest_port; + __le16 mask_l4_source_port; + __le16 mask_l2_ether_type; + u8 mask_l2_pcp_dei; + u8 mask_l3_dscp; + + u8 match_l3_protocol; + u8 mask_l3_protocol; +}; + +#define DPSW_RESULT_ACTION_SHIFT 0 +#define DPSW_RESULT_ACTION_SIZE 4 + +struct dpsw_cmd_acl_entry { + __le16 acl_id; + __le16 result_if_id; + __le32 precedence; + /* from LSB only the first 4 bits */ + u8 result_action; + u8 pad[7]; + __le64 pad2[4]; + __le64 key_iova; +}; + +struct dpsw_cmd_acl_if { + /* cmd word 0 */ + __le16 acl_id; + __le16 num_ifs; + __le32 pad; + /* cmd word 1 */ + __le64 if_id[4]; +}; + +struct dpsw_cmd_acl_get_attr { + __le16 acl_id; +}; + +struct dpsw_rsp_acl_get_attr { + /* cmd word 0 */ + __le64 pad; + /* cmd word 1 */ + __le16 max_entries; + __le16 num_entries; + __le16 num_ifs; +}; + +struct dpsw_rsp_ctrl_if_get_attr { + /* cmd word 0 */ + __le64 pad; + /* cmd word 1 */ + __le32 rx_fqid; + __le32 rx_err_fqid; + /* cmd word 2 */ + __le32 tx_err_conf_fqid; +}; + +struct dpsw_cmd_ctrl_if_set_pools { + u8 num_dpbp; + /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */ + u8 backup_pool; + __le16 pad; + __le32 dpbp_id[8]; + __le16 buffer_size[8]; +}; + +struct dpsw_rsp_get_api_version { + __le16 version_major; + __le16 version_minor; +}; + +#endif /* __FSL_DPSW_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c @@ -0,0 +1,2762 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "../../fsl-mc/include/mc-sys.h" +#include "../../fsl-mc/include/mc-cmd.h" +#include "dpsw.h" +#include "dpsw-cmd.h" + +static void build_if_id_bitmap(__le64 *bmap, + const u16 *id, + const u16 num_ifs) { + int i; + + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) + bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64], + (id[i] % 64), + 1); +} + +static void read_if_id_bitmap(u16 *if_id, + u16 *num_ifs, + __le64 *bmap) { + int bitmap[DPSW_MAX_IF] = { 0 }; + int i, j = 0; + int count = 0; + + for (i = 0; i < DPSW_MAX_IF; i++) { + bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]), + i % 64); + count += bitmap[i]; + } + + *num_ifs = (u16)count; + + for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) { + if (bitmap[i]) { + if_id[j] = (u16)i; + j++; + } + } +} + +/** + * dpsw_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpsw_id: DPSW unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpsw_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpsw_id, + u16 *token) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_open *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpsw_cmd_open *)cmd.params; + cmd_params->dpsw_id = cpu_to_le32(dpsw_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpsw_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_enable() - Enable DPSW functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_disable() - Disable DPSW functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_is_enabled() - Check if the DPSW is enabled + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise + */ +int dpsw_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpsw_rsp_is_enabled *cmd_rsp; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params; + *en = dpsw_get_field(cmd_rsp->enabled, ENABLE); + + return 0; +} + +/** + * dpsw_reset() - Reset the DPSW, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @irq_index: Identifies the interrupt index to configure + * @irq_cfg: IRQ configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_set_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + struct dpsw_irq_cfg *irq_cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_set_irq *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_irq *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->irq_val = cpu_to_le32(irq_cfg->val); + cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr); + cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_get_irq() - Get IRQ information from the DPSW + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @irq_index: The interrupt index to configure + * @type: Interrupt type: 0 represents message interrupt + * type (both irq_addr and irq_val are valid) + * @irq_cfg: IRQ attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_get_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + int *type, + struct dpsw_irq_cfg *irq_cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_get_irq *cmd_params; + struct dpsw_rsp_get_irq *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_get_irq *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_get_irq *)cmd.params; + irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr); + irq_cfg->val = le32_to_cpu(rsp_params->irq_val); + irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num); + *type = le32_to_cpu(rsp_params->irq_type); + + return 0; +} + +/** + * dpsw_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPCI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params; + dpsw_set_field(cmd_params->enable_state, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPCI object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_get_irq_status() - Get the current status of any pending interrupts + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_get_irq_status *cmd_params; + struct dpsw_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpsw_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPCI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_get_attributes() - Retrieve DPSW attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @attr: Returned DPSW attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpsw_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpsw_rsp_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params; + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + attr->max_fdbs = rsp_params->max_fdbs; + attr->num_fdbs = rsp_params->num_fdbs; + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans); + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans); + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries); + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time); + attr->id = le32_to_cpu(rsp_params->dpsw_id); + attr->mem_size = le16_to_cpu(rsp_params->mem_size); + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); + attr->max_meters_per_if = rsp_params->max_meters_per_if; + attr->options = le64_to_cpu(rsp_params->options); + attr->component_type = dpsw_get_field(rsp_params->component_type, + COMPONENT_TYPE); + + return 0; +} + +/** + * dpsw_set_reflection_if() - Set target interface for reflected interfaces. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Id + * + * Only one reflection receive interface is allowed per switch + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_set_reflection_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_link_cfg() - Set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface id + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_get_link_state - Return the link state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface id + * @state: Link state 1 - linkup, 0 - link down or disconnected + * + * @Return '0' on Success; Error code otherwise. + */ +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_get_link_state *cmd_params; + struct dpsw_rsp_if_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params; + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->up = dpsw_get_field(rsp_params->up, UP); + + return 0; +} + +/** + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @en: 1 - enable, 0 - disable + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_flooding *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @en: 1 - enable, 0 - disable + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_broadcast *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_multicast() - Enable/disable multicast for particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @en: 1 - enable, 0 - disable + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_multicast *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Tag Control Information Configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_tci_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_tci *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id); + dpsw_set_field(cmd_params->conf, DEI, cfg->dei); + dpsw_set_field(cmd_params->conf, PCP, cfg->pcp); + cmd_params->conf = cpu_to_le16(cmd_params->conf); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Tag Control Information Configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_tci_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_get_tci *cmd_params; + struct dpsw_rsp_if_get_tci *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params; + cfg->pcp = rsp_params->pcp; + cfg->dei = rsp_params->dei; + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id); + + return 0; +} + +/** + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: STP State configuration parameters + * + * The following STP states are supported - + * blocking, listening, learning, forwarding and disabled. + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_stp_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_stp *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->state, STATE, cfg->state); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_accepted_frames() + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Frame types configuration + * + * When is admit_only_vlan_tagged- the device will discard untagged + * frames or Priority-Tagged frames received on this interface. + * When admit_only_untagged- untagged frames or Priority-Tagged + * frames received on this interface will be accepted and assigned + * to a VID based on the PVID and VID Set for this interface. + * When admit_all - the device will accept VLAN tagged, untagged + * and priority tagged frames. + * The default is admit_all + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_accepted_frames_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_accepted_frames *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type); + dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT, + cfg->unaccept_act); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_accept_all_vlan() + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @accept_all: Accept or drop frames having different VLAN + * + * When this is accept (FALSE), the device will discard incoming + * frames for VLANs that do not include this interface in its + * Member set. When accept (TRUE), the interface will accept all incoming frames + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int accept_all) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_accept_all_vlan *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_get_counter() - Get specific counter of particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @type: Counter type + * @counter: return value + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpsw_counter type, + u64 *counter) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_get_counter *cmd_params; + struct dpsw_rsp_if_get_counter *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params; + *counter = le64_to_cpu(rsp_params->counter); + + return 0; +} + +/** + * dpsw_if_set_counter() - Set specific counter of particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @type: Counter type + * @counter: New counter value + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpsw_counter type, + u64 counter) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_counter *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->counter = cpu_to_le64(counter); + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_tx_selection() - Function is used for mapping variety + * of frame fields + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Traffic class mapping configuration + * + * Function is used for mapping variety of frame fields (DSCP, PCP) + * to Traffic Class. Traffic class is a number + * in the range from 0 to 7 + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_tx_selection_cfg *cfg) +{ + struct dpsw_cmd_if_set_tx_selection *cmd_params; + struct mc_command cmd = { 0 }; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR, + cfg->priority_selector); + + for (i = 0; i < 8; i++) { + cmd_params->tc_sched[i].delta_bandwidth = + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth); + dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE, + cfg->tc_sched[i].mode); + cmd_params->tc_id[i] = cfg->tc_id[i]; + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_reflection_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_reflection *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_reflection_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_reflection *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_flooding_metering() - Set flooding metering + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Metering parameters + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_metering_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_flooding_metering *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode); + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units); + cmd_params->cir = cpu_to_le32(cfg->cir); + cmd_params->eir = cpu_to_le32(cfg->eir); + cmd_params->cbs = cpu_to_le32(cfg->cbs); + cmd_params->ebs = cpu_to_le32(cfg->ebs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_set_metering() - Set interface metering for flooding + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @tc_id: Traffic class ID + * @cfg: Metering parameters + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_metering(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u8 tc_id, + const struct dpsw_metering_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_metering *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->tc_id = tc_id; + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode); + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units); + cmd_params->cir = cpu_to_le32(cfg->cir); + cmd_params->eir = cpu_to_le32(cfg->eir); + cmd_params->cbs = cpu_to_le32(cfg->cbs); + cmd_params->ebs = cpu_to_le32(cfg->ebs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface + * @cfg: Early-drop configuration + * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before dpsw_if_tc_set_early_drop + * + */ +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, + u8 *early_drop_buf) +{ + struct dpsw_prep_early_drop *ext_params; + + ext_params = (struct dpsw_prep_early_drop *)early_drop_buf; + dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode); + dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units); + ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold); + ext_params->green_drop_probability = cfg->green.drop_probability; + ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold); + ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold); + ext_params->yellow_drop_probability = cfg->yellow.drop_probability; + ext_params->yellow_max_threshold = + cpu_to_le64(cfg->yellow.max_threshold); + ext_params->yellow_min_threshold = + cpu_to_le64(cfg->yellow.min_threshold); +} + +/** + * dpsw_if_set_early_drop() - Set interface traffic class early-drop + * configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @tc_id: Traffic class selection (0-7) + * @early_drop_iova: I/O virtual address of 64 bytes; + * Must be cacheline-aligned and DMA-able memory + * + * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop() + * to prepare the early_drop_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u8 tc_id, + u64 early_drop_iova) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_early_drop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Tag Protocol identifier + * + * API Configures a distinct Ethernet type value (or TPID value) + * to indicate a VLAN tag in addition to the common + * TPID values 0x8100 and 0x88A8. + * Two additional TPID's are supported + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_custom_tpid_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_custom_tpid *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(cfg->tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Tag Protocol identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_custom_tpid_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_custom_tpid *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(cfg->tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_enable() - Enable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_disable() - Disable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_get_attributes() - Function obtains attributes of interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @attr: Returned interface attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_if_attr *attr) +{ + struct dpsw_rsp_if_get_attr *rsp_params; + struct dpsw_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params; + attr->num_tcs = rsp_params->num_tcs; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->options = le32_to_cpu(rsp_params->options); + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED); + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf, + ACCEPT_ALL_VLAN); + attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED); + attr->qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @frame_length: Maximum Frame Length + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u16 frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->frame_length = cpu_to_le16(frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @frame_length: Returned maximum Frame Length + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u16 *frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_if_get_max_frame_length *cmd_params; + struct dpsw_rsp_if_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params; + *frame_length = le16_to_cpu(rsp_params->frame_length); + + return 0; +} + +/** + * dpsw_vlan_add() - Adding new VLAN to DPSW. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: VLAN configuration + * + * Only VLAN ID and FDB ID are required parameters here. + * 12 bit VLAN ID is defined in IEEE802.1Q. + * Adding a duplicate VLAN ID is not allowed. + * FDB ID can be shared across multiple VLANs. Shared learning + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs + * with same fdb_id + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_vlan_add *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD, + cmd_flags, + token); + cmd_params = (struct dpsw_vlan_add *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id); + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces to add + * + * It adds only interfaces not belonging to this VLAN yet, + * otherwise an error is generated and an entire command is + * ignored. This function can be called numerous times always + * providing required interfaces delta. + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be + * transmitted as untagged. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces that should be transmitted as untagged + * + * These interfaces should already belong to this VLAN. + * By default all interfaces are transmitted as tagged. + * Providing un-existing interface or untagged interface that is + * configured untagged already generates an error and the entire + * command is ignored. + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be + * included in flooding when frame with unknown destination + * unicast MAC arrived. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces that should be used for flooding + * + * These interfaces should belong to this VLAN. By default all + * interfaces are included into flooding list. Providing + * un-existing interface or an interface that already in the + * flooding list generates an error and the entire command is + * ignored. + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces that should be removed + * + * Interfaces must belong to this VLAN, otherwise an error + * is returned and an the command is ignored + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be + * converted from transmitted as untagged to transmit as tagged. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces that should be removed + * + * Interfaces provided by API have to belong to this VLAN and + * configured untagged, otherwise an error is returned and the + * command is ignored + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be + * removed from the flooding list. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Set of interfaces used for flooding + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_manage_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_remove() - Remove an entire VLAN + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_remove *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_vlan_get_attributes() - Get VLAN attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @attr: Returned DPSW attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_get_attr *cmd_params; + struct dpsw_rsp_vlan_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params; + attr->fdb_id = le16_to_cpu(rsp_params->fdb_id); + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs); + attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs); + + return 0; +} + +/** + * dpsw_vlan_get_if() - Get interfaces belong to this VLAN + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Returned set of interfaces belong to this VLAN + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_get_if *cmd_params; + struct dpsw_rsp_vlan_get_if *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params; + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); + + return 0; +} + +/** + * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Returned set of flooding interfaces + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ + +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_get_if_flooding *cmd_params; + struct dpsw_rsp_vlan_get_if_flooding *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params; + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); + + return 0; +} + +/** + * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as + * untagged + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @vlan_id: VLAN Identifier + * @cfg: Returned set of untagged interfaces + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_vlan_get_if_untagged *cmd_params; + struct dpsw_rsp_vlan_get_if_untagged *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params; + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); + + return 0; +} + +/** + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for + * the reference + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Returned Forwarding Database Identifier + * @cfg: FDB Configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_add *cmd_params; + struct dpsw_rsp_fdb_add *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params; + cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time); + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params; + *fdb_id = le16_to_cpu(rsp_params->fdb_id); + + return 0; +} + +/** + * dpsw_fdb_remove() - Remove FDB from switch + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_remove *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Unicast entry configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_unicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_add_unicast *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + cmd_params->if_egress = cpu_to_le16(cfg->if_egress); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by + * unicast Ethernet address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Returned unicast entry configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_unicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_get_unicast *cmd_params; + struct dpsw_rsp_fdb_get_unicast *rsp_params; + int err, i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params; + cfg->if_egress = le16_to_cpu(rsp_params->if_egress); + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE); + + return 0; +} + +/** + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Unicast entry configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_unicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_remove_unicast *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + cmd_params->if_egress = cpu_to_le16(cfg->if_egress); + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Multicast entry configuration + * + * If group doesn't exist, it will be created. + * It adds only interfaces not belonging to this multicast group + * yet, otherwise error will be generated and the command is + * ignored. + * This function may be called numerous times always providing + * required interfaces delta. + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_multicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_add_multicast *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet + * address. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Returned multicast entry configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_multicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_get_multicast *cmd_params; + struct dpsw_rsp_fdb_get_multicast *rsp_params; + int err, i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params; + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs); + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE); + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id); + + return 0; +} + +/** + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast + * group. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @cfg: Multicast entry configuration + * + * Interfaces provided by this API have to exist in the group, + * otherwise an error will be returned and an entire command + * ignored. If there is no interface left in the group, + * an entire group is deleted + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_multicast_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_remove_multicast *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_set_learning_mode() - Define FDB learning mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @mode: Learning mode + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + enum dpsw_fdb_learning_mode mode) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_set_learning_mode *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_fdb_get_attributes() - Get FDB attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @fdb_id: Forwarding Database Identifier + * @attr: Returned FDB attributes + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_fdb_get_attr *cmd_params; + struct dpsw_rsp_fdb_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params; + cmd_params->fdb_id = cpu_to_le16(fdb_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params; + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries); + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time); + attr->learning_mode = dpsw_get_field(rsp_params->learning_mode, + LEARNING_MODE); + attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups); + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups); + + return 0; +} + +/** + * dpsw_acl_add() - Adds ACL to L2 switch. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: Returned ACL ID, for the future reference + * @cfg: ACL configuration + * + * Create Access Control List. Multiple ACLs can be created and + * co-exist in L2 switch + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *acl_id, + const struct dpsw_acl_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_add *cmd_params; + struct dpsw_rsp_acl_add *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params; + cmd_params->max_entries = cpu_to_le16(cfg->max_entries); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params; + *acl_id = le16_to_cpu(rsp_params->acl_id); + + return 0; +} + +/** + * dpsw_acl_remove() - Removes ACL from L2 switch. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_remove *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL. + * @key: Key + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before adding or removing acl_entry + * + */ +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, + u8 *entry_cfg_buf) +{ + struct dpsw_prep_acl_entry *ext_params; + int i; + + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf; + + for (i = 0; i < 6; i++) { + ext_params->match_l2_dest_mac[i] = + key->match.l2_dest_mac[5 - i]; + ext_params->match_l2_source_mac[i] = + key->match.l2_source_mac[5 - i]; + ext_params->mask_l2_dest_mac[i] = + key->mask.l2_dest_mac[5 - i]; + ext_params->mask_l2_source_mac[i] = + key->mask.l2_source_mac[5 - i]; + } + + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid); + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id); + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip); + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip); + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port); + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type); + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei; + ext_params->match_l3_dscp = key->match.l3_dscp; + ext_params->match_l4_source_port = + cpu_to_le16(key->match.l4_source_port); + + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid); + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id); + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip); + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip); + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port); + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port); + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type); + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei; + ext_params->mask_l3_dscp = key->mask.l3_dscp; + ext_params->match_l3_protocol = key->match.l3_protocol; + ext_params->mask_l3_protocol = key->mask.l3_protocol; +} + +/** + * dpsw_acl_add_entry() - Adds an entry to ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_set_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_entry_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_entry *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + dpsw_set_field(cmd_params->result_action, RESULT_ACTION, + cfg->result.action); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_remove_entry() - Removes an entry from ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Entry configuration + * + * warning: This function has to be called after dpsw_acl_set_entry_cfg() + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_entry_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_entry *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + dpsw_set_field(cmd_params->result_action, RESULT_ACTION, + cfg->result.action); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_add_if() - Associate interface/interfaces with ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Interfaces list + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_add_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL ID + * @cfg: Interfaces list + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_if_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_if *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_acl_get_attributes() - Get specific counter of particular interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @acl_id: ACL Identifier + * @attr: Returned ACL attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + struct dpsw_acl_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_acl_get_attr *cmd_params; + struct dpsw_rsp_acl_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params; + attr->max_entries = le16_to_cpu(rsp_params->max_entries); + attr->num_entries = le16_to_cpu(rsp_params->num_entries); + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + + return 0; +} + +/** + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @attr: Returned control interface attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpsw_ctrl_if_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpsw_rsp_ctrl_if_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params; + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid); + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid); + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid); + + return 0; +} + +/** + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @cfg: Buffer pools configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_ctrl_if_pools_cfg *pools) +{ + struct mc_command cmd = { 0 }; + struct dpsw_cmd_ctrl_if_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params; + cmd_params->num_dpbp = pools->num_dpbp; + for (i = 0; i < 8; i++) { + cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool, + i, + pools->pools[i].backup_pool); + cmd_params->buffer_size[i] = + cpu_to_le16(pools->pools[i].buffer_size); + cmd_params->dpbp_id[i] = + cpu_to_le32(pools->pools[i].dpbp_id); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_enable() - Enable control interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_ctrl_if_disable() - Function disables control interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_get_api_version() - Get Data Path Switch API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path switch API + * @minor_ver: Minor version of data path switch API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpsw_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_command cmd = { 0 }; + struct dpsw_rsp_get_api_version *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->version_major); + *minor_ver = le16_to_cpu(rsp_params->version_minor); + + return 0; +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h @@ -0,0 +1,1269 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPSW_H +#define __FSL_DPSW_H + +/* Data Path L2-Switch API + * Contains API for handling DPSW topology and functionality + */ + +struct fsl_mc_io; + +/** + * DPSW general definitions + */ + +/** + * Maximum number of traffic class priorities + */ +#define DPSW_MAX_PRIORITIES 8 +/** + * Maximum number of interfaces + */ +#define DPSW_MAX_IF 64 + +int dpsw_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpsw_id, + u16 *token); + +int dpsw_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPSW options + */ + +/** + * Disable flooding + */ +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL +/** + * Disable Multicast + */ +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL +/** + * Support control interface + */ +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL +/** + * Disable flooding metering + */ +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL +/** + * Enable metering + */ +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL + +/** + * enum dpsw_component_type - component type of a bridge + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an + * enterprise VLAN bridge or of a Provider Bridge used + * to process C-tagged frames + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a + * Provider Bridge + * + */ +enum dpsw_component_type { + DPSW_COMPONENT_TYPE_C_VLAN = 0, + DPSW_COMPONENT_TYPE_S_VLAN +}; + +/** + * struct dpsw_cfg - DPSW configuration + * @num_ifs: Number of external and internal interfaces + * @adv: Advanced parameters; default is all zeros; + * use this structure to change default settings + */ +struct dpsw_cfg { + u16 num_ifs; + /** + * struct adv - Advanced parameters + * @options: Enable/Disable DPSW features (bitmap) + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16 + * @max_meters_per_if: Number of meters per interface + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16 + * @max_fdb_entries: Number of FDB entries for default FDB table; + * 0 - indicates default 1024 entries. + * @fdb_aging_time: Default FDB aging time for default FDB table; + * 0 - indicates default 300 seconds + * @max_fdb_mc_groups: Number of multicast groups in each FDB table; + * 0 - indicates default 32 + * @component_type: Indicates the component type of this bridge + */ + struct { + u64 options; + u16 max_vlans; + u8 max_meters_per_if; + u8 max_fdbs; + u16 max_fdb_entries; + u16 fdb_aging_time; + u16 max_fdb_mc_groups; + enum dpsw_component_type component_type; + } adv; +}; + +int dpsw_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpsw_cfg *cfg, + u32 *obj_id); + +int dpsw_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + u32 object_id); + +int dpsw_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpsw_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpsw_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpsw_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPSW IRQ Index and Events + */ + +#define DPSW_IRQ_INDEX_IF 0x0000 +#define DPSW_IRQ_INDEX_L2SW 0x0001 + +/** + * IRQ event - Indicates that the link state changed + */ +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * struct dpsw_irq_cfg - IRQ configuration + * @addr: Address that must be written to signal a message-based interrupt + * @val: Value to write into irq_addr address + * @irq_num: A user defined number associated with this IRQ + */ +struct dpsw_irq_cfg { + u64 addr; + u32 val; + int irq_num; +}; + +int dpsw_set_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + struct dpsw_irq_cfg *irq_cfg); + +int dpsw_get_irq(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + int *type, + struct dpsw_irq_cfg *irq_cfg); + +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpsw_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpsw_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpsw_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpsw_attr - Structure representing DPSW attributes + * @id: DPSW object ID + * @options: Enable/Disable DPSW features + * @max_vlans: Maximum Number of VLANs + * @max_meters_per_if: Number of meters per interface + * @max_fdbs: Maximum Number of FDBs + * @max_fdb_entries: Number of FDB entries for default FDB table; + * 0 - indicates default 1024 entries. + * @fdb_aging_time: Default FDB aging time for default FDB table; + * 0 - indicates default 300 seconds + * @max_fdb_mc_groups: Number of multicast groups in each FDB table; + * 0 - indicates default 32 + * @mem_size: DPSW frame storage memory size + * @num_ifs: Number of interfaces + * @num_vlans: Current number of VLANs + * @num_fdbs: Current number of FDBs + * @component_type: Component type of this bridge + */ +struct dpsw_attr { + int id; + u64 options; + u16 max_vlans; + u8 max_meters_per_if; + u8 max_fdbs; + u16 max_fdb_entries; + u16 fdb_aging_time; + u16 max_fdb_mc_groups; + u16 num_ifs; + u16 mem_size; + u16 num_vlans; + u8 num_fdbs; + enum dpsw_component_type component_type; +}; + +int dpsw_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpsw_attr *attr); + +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id); + +/** + * enum dpsw_action - Action selection for special/control frames + * @DPSW_ACTION_DROP: Drop frame + * @DPSW_ACTION_REDIRECT: Redirect frame to control port + */ +enum dpsw_action { + DPSW_ACTION_DROP = 0, + DPSW_ACTION_REDIRECT = 1 +}; + +/** + * Enable auto-negotiation + */ +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpsw_link_cfg - Structure representing DPSW link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_' values + */ +struct dpsw_link_cfg { + u32 rate; + u64 options; +}; + +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_link_cfg *cfg); +/** + * struct dpsw_link_state - Structure representing DPSW link state + * @rate: Rate + * @options: Mask of available options; use 'DPSW_LINK_OPT_' values + * @up: 0 - covers two cases: down and disconnected, 1 - up + */ +struct dpsw_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_link_state *state); + +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en); + +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en); + +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int en); + +/** + * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration + * @pcp: Priority Code Point (PCP): a 3-bit field which refers + * to the IEEE 802.1p priority + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used + * separately or in conjunction with PCP to indicate frames + * eligible to be dropped in the presence of congestion + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN + * to which the frame belongs. The hexadecimal values + * of 0x000 and 0xFFF are reserved; + * all other values may be used as VLAN identifiers, + * allowing up to 4,094 VLANs + */ +struct dpsw_tci_cfg { + u8 pcp; + u8 dei; + u16 vlan_id; +}; + +int dpsw_if_set_tci(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_tci_cfg *cfg); + +int dpsw_if_get_tci(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_tci_cfg *cfg); + +/** + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states + * @DPSW_STP_STATE_BLOCKING: Blocking state + * @DPSW_STP_STATE_LISTENING: Listening state + * @DPSW_STP_STATE_LEARNING: Learning state + * @DPSW_STP_STATE_FORWARDING: Forwarding state + * + */ +enum dpsw_stp_state { + DPSW_STP_STATE_BLOCKING = 0, + DPSW_STP_STATE_LISTENING = 1, + DPSW_STP_STATE_LEARNING = 2, + DPSW_STP_STATE_FORWARDING = 3 +}; + +/** + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration + * @vlan_id: VLAN ID STP state + * @state: STP state + */ +struct dpsw_stp_cfg { + u16 vlan_id; + enum dpsw_stp_state state; +}; + +int dpsw_if_set_stp(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_stp_cfg *cfg); + +/** + * enum dpsw_accepted_frames - Types of frames to accept + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority tagged frames + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * Priority-Tagged frames received on this interface. + * + */ +enum dpsw_accepted_frames { + DPSW_ADMIT_ALL = 1, + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3 +}; + +/** + * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration + * @type: Defines ingress accepted frames + * @unaccept_act: When a frame is not accepted, it may be discarded or + * redirected to control interface depending on this mode + */ +struct dpsw_accepted_frames_cfg { + enum dpsw_accepted_frames type; + enum dpsw_action unaccept_act; +}; + +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_accepted_frames_cfg *cfg); + +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + int accept_all); + +/** + * enum dpsw_counter - Counters types + * @DPSW_CNT_ING_FRAME: Counts ingress frames + * @DPSW_CNT_ING_BYTE: Counts ingress bytes + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPSW_CNT_EGR_FRAME: Counts egress frames + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames + */ +enum dpsw_counter { + DPSW_CNT_ING_FRAME = 0x0, + DPSW_CNT_ING_BYTE = 0x1, + DPSW_CNT_ING_FLTR_FRAME = 0x2, + DPSW_CNT_ING_FRAME_DISCARD = 0x3, + DPSW_CNT_ING_MCAST_FRAME = 0x4, + DPSW_CNT_ING_MCAST_BYTE = 0x5, + DPSW_CNT_ING_BCAST_FRAME = 0x6, + DPSW_CNT_ING_BCAST_BYTES = 0x7, + DPSW_CNT_EGR_FRAME = 0x8, + DPSW_CNT_EGR_BYTE = 0x9, + DPSW_CNT_EGR_FRAME_DISCARD = 0xa, + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb +}; + +int dpsw_if_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpsw_counter type, + u64 *counter); + +int dpsw_if_set_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpsw_counter type, + u64 counter); + +/** + * Maximum number of TC + */ +#define DPSW_MAX_TC 8 + +/** + * enum dpsw_priority_selector - User priority + * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which + * refers to the IEEE 802.1p priority. + * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit + * field from IP header + * + */ +enum dpsw_priority_selector { + DPSW_UP_PCP = 0, + DPSW_UP_DSCP = 1 +}; + +/** + * enum dpsw_schedule_mode - Traffic classes scheduling + * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority + * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm + */ +enum dpsw_schedule_mode { + DPSW_SCHED_STRICT_PRIORITY, + DPSW_SCHED_WEIGHTED +}; + +/** + * struct dpsw_tx_schedule_cfg - traffic class configuration + * @mode: Strict or weight-based scheduling + * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000 + */ +struct dpsw_tx_schedule_cfg { + enum dpsw_schedule_mode mode; + u16 delta_bandwidth; +}; + +/** + * struct dpsw_tx_selection_cfg - Mapping user priority into traffic + * class configuration + * @priority_selector: Source for user priority regeneration + * @tc_id: The Regenerated User priority that the incoming + * User Priority is mapped to for this interface + * @tc_sched: Traffic classes configuration + */ +struct dpsw_tx_selection_cfg { + enum dpsw_priority_selector priority_selector; + u8 tc_id[DPSW_MAX_PRIORITIES]; + struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC]; +}; + +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_tx_selection_cfg *cfg); + +/** + * enum dpsw_reflection_filter - Filter type for frames to reflect + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to + * particular VLAN defined by vid parameter + * + */ +enum dpsw_reflection_filter { + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 +}; + +/** + * struct dpsw_reflection_cfg - Structure representing reflection information + * @filter: Filter type for frames to reflect + * @vlan_id: Vlan Id to reflect; valid only when filter type is + * DPSW_INGRESS_VLAN + */ +struct dpsw_reflection_cfg { + enum dpsw_reflection_filter filter; + u16 vlan_id; +}; + +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_reflection_cfg *cfg); + +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_reflection_cfg *cfg); + +/** + * enum dpsw_metering_mode - Metering modes + * @DPSW_METERING_MODE_NONE: metering disabled + * @DPSW_METERING_MODE_RFC2698: RFC 2698 + * @DPSW_METERING_MODE_RFC4115: RFC 4115 + */ +enum dpsw_metering_mode { + DPSW_METERING_MODE_NONE = 0, + DPSW_METERING_MODE_RFC2698, + DPSW_METERING_MODE_RFC4115 +}; + +/** + * enum dpsw_metering_unit - Metering count + * @DPSW_METERING_UNIT_BYTES: count bytes + * @DPSW_METERING_UNIT_FRAMES: count frames + */ +enum dpsw_metering_unit { + DPSW_METERING_UNIT_BYTES = 0, + DPSW_METERING_UNIT_FRAMES +}; + +/** + * struct dpsw_metering_cfg - Metering configuration + * @mode: metering modes + * @units: Bytes or frame units + * @cir: Committed information rate (CIR) in Kbits/s + * @eir: Peak information rate (PIR) Kbit/s rfc2698 + * Excess information rate (EIR) Kbit/s rfc4115 + * @cbs: Committed burst size (CBS) in bytes + * @ebs: Peak burst size (PBS) in bytes for rfc2698 + * Excess bust size (EBS) in bytes rfc4115 + * + */ +struct dpsw_metering_cfg { + enum dpsw_metering_mode mode; + enum dpsw_metering_unit units; + u32 cir; + u32 eir; + u32 cbs; + u32 ebs; +}; + +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpsw_metering_cfg *cfg); + +int dpsw_if_set_metering(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u8 tc_id, + const struct dpsw_metering_cfg *cfg); + +/** + * enum dpsw_early_drop_unit - DPSW early drop unit + * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes + * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames + */ +enum dpsw_early_drop_unit { + DPSW_EARLY_DROP_UNIT_BYTE = 0, + DPSW_EARLY_DROP_UNIT_FRAMES +}; + +/** + * enum dpsw_early_drop_mode - DPSW early drop mode + * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled + * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode + * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode + */ +enum dpsw_early_drop_mode { + DPSW_EARLY_DROP_MODE_NONE = 0, + DPSW_EARLY_DROP_MODE_TAIL, + DPSW_EARLY_DROP_MODE_WRED +}; + +/** + * struct dpsw_wred_cfg - WRED configuration + * @max_threshold: maximum threshold that packets may be discarded. Above this + * threshold all packets are discarded; must be less than 2^39; + * approximated to be expressed as (x+256)*2^(y-1) due to HW + * implementation. + * @min_threshold: minimum threshold that packets may be discarded at + * @drop_probability: probability that a packet will be discarded (1-100, + * associated with the maximum threshold) + */ +struct dpsw_wred_cfg { + u64 min_threshold; + u64 max_threshold; + u8 drop_probability; +}; + +/** + * struct dpsw_early_drop_cfg - early-drop configuration + * @drop_mode: drop mode + * @units: count units + * @yellow: WRED - 'yellow' configuration + * @green: WRED - 'green' configuration + * @tail_drop_threshold: tail drop threshold + */ +struct dpsw_early_drop_cfg { + enum dpsw_early_drop_mode drop_mode; + enum dpsw_early_drop_unit units; + struct dpsw_wred_cfg yellow; + struct dpsw_wred_cfg green; + u32 tail_drop_threshold; +}; + +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg, + u8 *early_drop_buf); + +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u8 tc_id, + u64 early_drop_iova); + +/** + * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier + * @tpid: An additional tag protocol identifier + */ +struct dpsw_custom_tpid_cfg { + u16 tpid; +}; + +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_custom_tpid_cfg *cfg); + +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_custom_tpid_cfg *cfg); + +int dpsw_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id); + +int dpsw_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id); + +/** + * struct dpsw_if_attr - Structure representing DPSW interface attributes + * @num_tcs: Number of traffic classes + * @rate: Transmit rate in bits per second + * @options: Interface configuration options (bitmap) + * @enabled: Indicates if interface is enabled + * @accept_all_vlan: The device discards/accepts incoming frames + * for VLANs that do not include this interface + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device + * discards untagged frames or priority-tagged frames received on + * this interface; + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority- + * tagged frames received on this interface are accepted + * @qdid: control frames transmit qdid + */ +struct dpsw_if_attr { + u8 num_tcs; + u32 rate; + u32 options; + int enabled; + int accept_all_vlan; + enum dpsw_accepted_frames admit_untagged; + u16 qdid; +}; + +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpsw_if_attr *attr); + +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u16 frame_length); + +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + u16 *frame_length); + +/** + * struct dpsw_vlan_cfg - VLAN Configuration + * @fdb_id: Forwarding Data Base + */ +struct dpsw_vlan_cfg { + u16 fdb_id; +}; + +int dpsw_vlan_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_cfg *cfg); + +/** + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces + * @num_ifs: The number of interfaces that are assigned to the egress + * list for this VLAN + * @if_id: The set of interfaces that are + * assigned to the egress list for this VLAN + */ +struct dpsw_vlan_if_cfg { + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + const struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id); + +/** + * struct dpsw_vlan_attr - VLAN attributes + * @fdb_id: Associated FDB ID + * @num_ifs: Number of interfaces + * @num_untagged_ifs: Number of untagged interfaces + * @num_flooding_ifs: Number of flooding interfaces + */ +struct dpsw_vlan_attr { + u16 fdb_id; + u16 num_ifs; + u16 num_untagged_ifs; + u16 num_flooding_ifs; +}; + +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_attr *attr); + +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg); + +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 vlan_id, + struct dpsw_vlan_if_cfg *cfg); + +/** + * struct dpsw_fdb_cfg - FDB Configuration + * @num_fdb_entries: Number of FDB entries + * @fdb_aging_time: Aging time in seconds + */ +struct dpsw_fdb_cfg { + u16 num_fdb_entries; + u16 fdb_aging_time; +}; + +int dpsw_fdb_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *fdb_id, + const struct dpsw_fdb_cfg *cfg); + +int dpsw_fdb_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id); + +/** + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic + * @DPSW_FDB_ENTRY_STATIC: Static entry + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry + */ +enum dpsw_fdb_entry_type { + DPSW_FDB_ENTRY_STATIC = 0, + DPSW_FDB_ENTRY_DINAMIC = 1 +}; + +/** + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @if_egress: Egress interface ID + */ +struct dpsw_fdb_unicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 if_egress; +}; + +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_unicast_cfg *cfg); + +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_unicast_cfg *cfg); + +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_unicast_cfg *cfg); + +/** + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration + * @type: Select static or dynamic entry + * @mac_addr: MAC address + * @num_ifs: Number of external and internal interfaces + * @if_id: Egress interface IDs + */ +struct dpsw_fdb_multicast_cfg { + enum dpsw_fdb_entry_type type; + u8 mac_addr[6]; + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_multicast_cfg *cfg); + +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_multicast_cfg *cfg); + +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + const struct dpsw_fdb_multicast_cfg *cfg); + +/** + * enum dpsw_fdb_learning_mode - Auto-learning modes + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU + * + * NONE - SECURE LEARNING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. DMAC destination + * 2. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Flooding list of interfaces + * 2. Control interface + * SECURE LEARING + * SMAC found DMAC found CTLU Action + * v v Forward frame to + * 1. DMAC destination + * - v Forward frame to + * 1. Control interface + * v - Forward frame to + * 1. Flooding list of interfaces + * - - Forward frame to + * 1. Control interface + */ +enum dpsw_fdb_learning_mode { + DPSW_FDB_LEARNING_MODE_DIS = 0, + DPSW_FDB_LEARNING_MODE_HW = 1, + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2, + DPSW_FDB_LEARNING_MODE_SECURE = 3 +}; + +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + enum dpsw_fdb_learning_mode mode); + +/** + * struct dpsw_fdb_attr - FDB Attributes + * @max_fdb_entries: Number of FDB entries + * @fdb_aging_time: Aging time in seconds + * @learning_mode: Learning mode + * @num_fdb_mc_groups: Current number of multicast groups + * @max_fdb_mc_groups: Maximum number of multicast groups + */ +struct dpsw_fdb_attr { + u16 max_fdb_entries; + u16 fdb_aging_time; + enum dpsw_fdb_learning_mode learning_mode; + u16 num_fdb_mc_groups; + u16 max_fdb_mc_groups; +}; + +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 fdb_id, + struct dpsw_fdb_attr *attr); + +/** + * struct dpsw_acl_cfg - ACL Configuration + * @max_entries: Number of FDB entries + */ +struct dpsw_acl_cfg { + u16 max_entries; +}; + +/** + * struct dpsw_acl_fields - ACL fields. + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast, + * slow protocols, MVRP, STP + * @l2_source_mac: Source MAC address + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae, + * Q-in-Q, IPv4, IPv6, PPPoE + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload + * @l2_vlan_id: layer 2 VLAN ID + * @l2_ether_type: layer 2 Ethernet type + * @l3_dscp: Layer 3 differentiated services code point + * @l3_protocol: Tells the Network layer at the destination host, to which + * Protocol this packet belongs to. The following protocol are + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6 + * (encapsulation), GRE, PTP + * @l3_source_ip: Source IPv4 IP + * @l3_dest_ip: Destination IPv4 IP + * @l4_source_port: Source TCP/UDP Port + * @l4_dest_port: Destination TCP/UDP Port + */ +struct dpsw_acl_fields { + u8 l2_dest_mac[6]; + u8 l2_source_mac[6]; + u16 l2_tpid; + u8 l2_pcp_dei; + u16 l2_vlan_id; + u16 l2_ether_type; + u8 l3_dscp; + u8 l3_protocol; + u32 l3_source_ip; + u32 l3_dest_ip; + u16 l4_source_port; + u16 l4_dest_port; +}; + +/** + * struct dpsw_acl_key - ACL key + * @match: Match fields + * @mask: Mask: b'1 - valid, b'0 don't care + */ +struct dpsw_acl_key { + struct dpsw_acl_fields match; + struct dpsw_acl_fields mask; +}; + +/** + * enum dpsw_acl_action + * @DPSW_ACL_ACTION_DROP: Drop frame + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port + * @DPSW_ACL_ACTION_ACCEPT: Accept frame + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface + */ +enum dpsw_acl_action { + DPSW_ACL_ACTION_DROP, + DPSW_ACL_ACTION_REDIRECT, + DPSW_ACL_ACTION_ACCEPT, + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF +}; + +/** + * struct dpsw_acl_result - ACL action + * @action: Action should be taken when ACL entry hit + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for + * action + */ +struct dpsw_acl_result { + enum dpsw_acl_action action; + u16 if_id; +}; + +/** + * struct dpsw_acl_entry_cfg - ACL entry + * @key_iova: I/O virtual address of DMA-able memory filled with key after call + * to dpsw_acl_prepare_entry_cfg() + * @result: Required action when entry hit occurs + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change + * during the lifetime of a Policy. It is user responsibility to + * space the priorities according to consequent rule additions. + */ +struct dpsw_acl_entry_cfg { + u64 key_iova; + struct dpsw_acl_result result; + int precedence; +}; + +int dpsw_acl_add(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *acl_id, + const struct dpsw_acl_cfg *cfg); + +int dpsw_acl_remove(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id); + +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key, + uint8_t *entry_cfg_buf); + +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_entry_cfg *cfg); + +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_entry_cfg *cfg); + +/** + * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL + * @num_ifs: Number of interfaces + * @if_id: List of interfaces + */ +struct dpsw_acl_if_cfg { + u16 num_ifs; + u16 if_id[DPSW_MAX_IF]; +}; + +int dpsw_acl_add_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_if_cfg *cfg); + +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + const struct dpsw_acl_if_cfg *cfg); + +/** + * struct dpsw_acl_attr - ACL Attributes + * @max_entries: Max number of ACL entries + * @num_entries: Number of used ACL entries + * @num_ifs: Number of interfaces associated with ACL + */ +struct dpsw_acl_attr { + u16 max_entries; + u16 num_entries; + u16 num_ifs; +}; + +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 acl_id, + struct dpsw_acl_attr *attr); +/** + * struct dpsw_ctrl_if_attr - Control interface attributes + * @rx_fqid: Receive FQID + * @rx_err_fqid: Receive error FQID + * @tx_err_conf_fqid: Transmit error and confirmation FQID + */ +struct dpsw_ctrl_if_attr { + u32 rx_fqid; + u32 rx_err_fqid; + u32 tx_err_conf_fqid; +}; + +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpsw_ctrl_if_attr *attr); + +/** + * Maximum number of DPBP + */ +#define DPSW_MAX_DPBP 8 + +/** + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + */ +struct dpsw_ctrl_if_pools_cfg { + u8 num_dpbp; + /** + * struct pools - Buffer pools parameters + * @dpbp_id: DPBP object ID + * @buffer_size: Buffer size + * @backup_pool: Backup pool + */ + struct { + int dpbp_id; + u16 buffer_size; + int backup_pool; + } pools[DPSW_MAX_DPBP]; +}; + +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpsw_ctrl_if_pools_cfg *cfg); + +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpsw_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +#endif /* __FSL_DPSW_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c @@ -0,0 +1,1857 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "../../fsl-mc/include/mc.h" +#include "dpsw.h" +#include "dpsw-cmd.h" + +static const char ethsw_drv_version[] = "0.1"; + +/* Minimal supported DPSE version */ +#define DPSW_MIN_VER_MAJOR 8 +#define DPSW_MIN_VER_MINOR 0 + +/* IRQ index */ +#define DPSW_MAX_IRQ_NUM 2 + +#define ETHSW_VLAN_MEMBER 1 +#define ETHSW_VLAN_UNTAGGED 2 +#define ETHSW_VLAN_PVID 4 +#define ETHSW_VLAN_GLOBAL 8 + +/* Maximum Frame Length supported by HW (currently 10k) */ +#define DPAA2_MFL (10 * 1024) +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN) + +struct ethsw_port_priv { + struct net_device *netdev; + struct list_head list; + u16 port_index; + struct ethsw_dev_priv *ethsw_priv; + u8 stp_state; + + char vlans[VLAN_VID_MASK + 1]; + +}; + +struct ethsw_dev_priv { + struct net_device *netdev; + struct fsl_mc_io *mc_io; + u16 dpsw_handle; + struct dpsw_attr sw_attr; + int dev_id; + /*TODO: redundant, we can use the slave dev list */ + struct list_head port_list; + + bool flood; + bool learning; + + char vlans[VLAN_VID_MASK + 1]; +}; + +static int ethsw_port_stop(struct net_device *netdev); +static int ethsw_port_open(struct net_device *netdev); + +static inline void __get_priv(struct net_device *netdev, + struct ethsw_dev_priv **priv, + struct ethsw_port_priv **port_priv) +{ + struct ethsw_dev_priv *_priv = NULL; + struct ethsw_port_priv *_port_priv = NULL; + + if (netdev->flags & IFF_MASTER) { + _priv = netdev_priv(netdev); + } else { + _port_priv = netdev_priv(netdev); + _priv = _port_priv->ethsw_priv; + } + + if (priv) + *priv = _priv; + if (port_priv) + *port_priv = _port_priv; +} + +/* -------------------------------------------------------------------------- */ +/* ethsw netdevice ops */ + +static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev) +{ + /* we don't support I/O for now, drop the frame */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int ethsw_open(struct net_device *netdev) +{ + struct ethsw_dev_priv *priv = netdev_priv(netdev); + struct list_head *pos; + struct ethsw_port_priv *port_priv = NULL; + int err; + + err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle); + if (err) { + netdev_err(netdev, "dpsw_enable err %d\n", err); + return err; + } + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct ethsw_port_priv, list); + err = dev_open(port_priv->netdev); + if (err) + netdev_err(port_priv->netdev, "dev_open err %d\n", err); + } + + return 0; +} + +static int ethsw_stop(struct net_device *netdev) +{ + struct ethsw_dev_priv *priv = netdev_priv(netdev); + struct list_head *pos; + struct ethsw_port_priv *port_priv = NULL; + int err; + + err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle); + if (err) { + netdev_err(netdev, "dpsw_disable err %d\n", err); + return err; + } + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct ethsw_port_priv, list); + err = dev_close(port_priv->netdev); + if (err) + netdev_err(port_priv->netdev, + "dev_close err %d\n", err); + } + + return 0; +} + +static int ethsw_add_vlan(struct net_device *netdev, u16 vid) +{ + struct ethsw_dev_priv *priv = netdev_priv(netdev); + int err; + + struct dpsw_vlan_cfg vcfg = { + /* TODO: add support for VLAN private FDBs */ + .fdb_id = 0, + }; + if (priv->vlans[vid]) { + netdev_err(netdev, "VLAN already configured\n"); + return -EEXIST; + } + + err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); + if (err) { + netdev_err(netdev, "dpsw_vlan_add err %d\n", err); + return err; + } + priv->vlans[vid] = ETHSW_VLAN_MEMBER; + + return 0; +} + +static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_dev_priv *priv = port_priv->ethsw_priv; + int err; + + struct dpsw_vlan_if_cfg vcfg = { + .num_ifs = 1, + .if_id[0] = port_priv->port_index, + }; + + if (port_priv->vlans[vid]) { + netdev_err(netdev, "VLAN already configured\n"); + return -EEXIST; + } + + if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) { + netdev_err(netdev, "interface must be down to change PVID!\n"); + return -EBUSY; + } + + err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg); + if (err) { + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); + return err; + } + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; + + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { + err = dpsw_vlan_add_if_untagged(priv->mc_io, 0, + priv->dpsw_handle, vid, &vcfg); + if (err) { + netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n", + err); + return err; + } + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; + } + + if (flags & BRIDGE_VLAN_INFO_PVID) { + struct dpsw_tci_cfg tci_cfg = { + /* TODO: at least add better defaults if these cannot + * be configured + */ + .pcp = 0, + .dei = 0, + .vlan_id = vid, + }; + + err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle, + port_priv->port_index, &tci_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); + return err; + } + port_priv->vlans[vid] |= ETHSW_VLAN_PVID; + } + + return 0; +} + +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = { + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, + .len = sizeof(struct bridge_vlan_info), }, +}; + +static int ethsw_setlink_af_spec(struct net_device *netdev, + struct nlattr **tb) +{ + struct bridge_vlan_info *vinfo; + struct ethsw_dev_priv *priv = NULL; + struct ethsw_port_priv *port_priv = NULL; + int err = 0; + + if (!tb[IFLA_BRIDGE_VLAN_INFO]) { + netdev_err(netdev, "no VLAN INFO in nlmsg\n"); + return -EOPNOTSUPP; + } + + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); + + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) + return -EINVAL; + + __get_priv(netdev, &priv, &port_priv); + + if (!port_priv || !priv->vlans[vinfo->vid]) { + /* command targets switch device or this is a new VLAN */ + err = ethsw_add_vlan(priv->netdev, vinfo->vid); + if (err) + return err; + + /* command targets switch device; mark it*/ + if (!port_priv) + priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL; + } + + if (port_priv) { + /* command targets switch port */ + err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags); + if (err) + return err; + } + + return 0; +} + +static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { + [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, + [IFLA_BRPORT_COST] = { .type = NLA_U32 }, + [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, + [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, + [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, +}; + +static int ethsw_set_learning(struct net_device *netdev, u8 flag) +{ + struct ethsw_dev_priv *priv = netdev_priv(netdev); + enum dpsw_fdb_learning_mode learn_mode; + int err; + + if (flag) + learn_mode = DPSW_FDB_LEARNING_MODE_HW; + else + learn_mode = DPSW_FDB_LEARNING_MODE_DIS; + + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, + 0, learn_mode); + if (err) { + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); + return err; + } + priv->learning = !!flag; + + return 0; +} + +static int ethsw_port_set_flood(struct net_device *netdev, u8 flag) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_dev_priv *priv = port_priv->ethsw_priv; + int err; + + err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle, + port_priv->port_index, (int)flag); + if (err) { + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err); + return err; + } + priv->flood = !!flag; + + return 0; +} + +static int ethsw_port_set_state(struct net_device *netdev, u8 state) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_dev_priv *priv = port_priv->ethsw_priv; + u8 old_state = port_priv->stp_state; + int err; + + struct dpsw_stp_cfg stp_cfg = { + .vlan_id = 1, + .state = state, + }; + /* TODO: check port state, interface may be down */ + + if (state > BR_STATE_BLOCKING) + return -EINVAL; + + if (state == port_priv->stp_state) + return 0; + + if (state == BR_STATE_DISABLED) { + port_priv->stp_state = state; + + err = ethsw_port_stop(netdev); + if (err) + goto error; + } else { + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, + port_priv->port_index, &stp_cfg); + if (err) { + netdev_err(netdev, "dpsw_if_set_stp err %d\n", err); + return err; + } + + port_priv->stp_state = state; + + if (old_state == BR_STATE_DISABLED) { + err = ethsw_port_open(netdev); + if (err) + goto error; + } + } + + return 0; +error: + port_priv->stp_state = old_state; + return err; +} + +static int ethsw_setlink_protinfo(struct net_device *netdev, + struct nlattr **tb) +{ + struct ethsw_dev_priv *priv; + struct ethsw_port_priv *port_priv = NULL; + int err = 0; + + __get_priv(netdev, &priv, &port_priv); + + if (tb[IFLA_BRPORT_LEARNING]) { + u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]); + + if (port_priv) + netdev_warn(netdev, + "learning set on whole switch dev\n"); + + err = ethsw_set_learning(priv->netdev, flag); + if (err) + return err; + + } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) { + u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]); + + err = ethsw_port_set_flood(port_priv->netdev, flag); + if (err) + return err; + + } else if (tb[IFLA_BRPORT_STATE] && port_priv) { + u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]); + + err = ethsw_port_set_state(port_priv->netdev, state); + if (err) + return err; + + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +static int ethsw_setlink(struct net_device *netdev, + struct nlmsghdr *nlh, + u16 flags) +{ + struct nlattr *attr; + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1]; + int err = 0; + + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (attr) { + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, + ifla_br_policy); + if (err) { + netdev_err(netdev, + "nla_parse_nested for br_policy err %d\n", + err); + return err; + } + + err = ethsw_setlink_af_spec(netdev, tb); + return err; + } + + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); + if (attr) { + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr, + ifla_brport_policy); + if (err) { + netdev_err(netdev, + "nla_parse_nested for brport_policy err %d\n", + err); + return err; + } + + err = ethsw_setlink_protinfo(netdev, tb); + return err; + } + + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n"); + return -EOPNOTSUPP; +} + +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev, + struct ethsw_dev_priv *priv) +{ + u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN; + int iflink; + int err; + + err = nla_put_string(skb, IFLA_IFNAME, netdev->name); + if (err) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex); + if (err) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); + if (err) + goto nla_put_err; + if (netdev->addr_len) { + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, + netdev->dev_addr); + if (err) + goto nla_put_err; + } + + iflink = dev_get_iflink(netdev); + if (netdev->ifindex != iflink) { + err = nla_put_u32(skb, IFLA_LINK, iflink); + if (err) + goto nla_put_err; + } + + return 0; + +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + return err; +} + +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev, + struct ethsw_port_priv *port_priv) +{ + struct nlattr *nest; + int err; + + u8 stp_state = port_priv->stp_state; + + if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING) + stp_state = BR_STATE_BLOCKING; + + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); + if (!nest) { + netdev_err(netdev, "nla_nest_start failed\n"); + return -ENOMEM; + } + + err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state); + if (err) + goto nla_put_err; + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); + if (err) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, + port_priv->ethsw_priv->learning); + if (err) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, + port_priv->ethsw_priv->flood); + if (err) + goto nla_put_err; + nla_nest_end(skb, nest); + + return 0; + +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + nla_nest_cancel(skb, nest); + return err; +} + +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev, + struct ethsw_dev_priv *priv, + struct ethsw_port_priv *port_priv) +{ + struct nlattr *nest; + struct bridge_vlan_info vinfo; + const char *vlans; + u16 i; + int err; + + nest = nla_nest_start(skb, IFLA_AF_SPEC); + if (!nest) { + netdev_err(netdev, "nla_nest_start failed"); + return -ENOMEM; + } + + if (port_priv) + vlans = port_priv->vlans; + else + vlans = priv->vlans; + + for (i = 0; i < VLAN_VID_MASK + 1; i++) { + vinfo.flags = 0; + vinfo.vid = i; + + if (vlans[i] & ETHSW_VLAN_UNTAGGED) + vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + if (vlans[i] & ETHSW_VLAN_PVID) + vinfo.flags |= BRIDGE_VLAN_INFO_PVID; + + if (vlans[i] & ETHSW_VLAN_MEMBER) { + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, + sizeof(vinfo), &vinfo); + if (err) + goto nla_put_err; + } + } + + nla_nest_end(skb, nest); + + return 0; +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + nla_nest_cancel(skb, nest); + return err; +} + +static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *netdev, u32 filter_mask, + int nlflags) +{ + struct ethsw_dev_priv *priv; + struct ethsw_port_priv *port_priv = NULL; + struct ifinfomsg *hdr; + struct nlmsghdr *nlh; + int err; + + __get_priv(netdev, &priv, &port_priv); + + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + hdr = nlmsg_data(nlh); + memset(hdr, 0, sizeof(*hdr)); + hdr->ifi_family = AF_BRIDGE; + hdr->ifi_type = netdev->type; + hdr->ifi_index = netdev->ifindex; + hdr->ifi_flags = dev_get_flags(netdev); + + err = __nla_put_netdev(skb, netdev, priv); + if (err) + goto nla_put_err; + + if (port_priv) { + err = __nla_put_port(skb, netdev, port_priv); + if (err) + goto nla_put_err; + } + + /* Check if the VID information is requested */ + if (filter_mask & RTEXT_FILTER_BRVLAN) { + err = __nla_put_vlan(skb, netdev, priv, port_priv); + if (err) + goto nla_put_err; + } + + nlmsg_end(skb, nlh); + return skb->len; + +nla_put_err: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid) +{ + struct list_head *pos; + struct ethsw_port_priv *ppriv_local = NULL; + int err = 0; + + if (!priv->vlans[vid]) + return -ENOENT; + + err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid); + if (err) { + netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err); + return err; + } + priv->vlans[vid] = 0; + + list_for_each(pos, &priv->port_list) { + ppriv_local = list_entry(pos, struct ethsw_port_priv, + list); + ppriv_local->vlans[vid] = 0; + } + + return 0; +} + +static int ethsw_dellink_port(struct ethsw_dev_priv *priv, + struct ethsw_port_priv *port_priv, + u16 vid) +{ + struct list_head *pos; + struct ethsw_port_priv *ppriv_local = NULL; + struct dpsw_vlan_if_cfg vcfg = { + .num_ifs = 1, + .if_id[0] = port_priv->port_index, + }; + unsigned int count = 0; + int err = 0; + + if (!port_priv->vlans[vid]) + return -ENOENT; + + /* VLAN will be deleted from switch if global flag is not set + * and is configured on only one port + */ + if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) { + list_for_each(pos, &priv->port_list) { + ppriv_local = list_entry(pos, struct ethsw_port_priv, + list); + if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER) + count++; + } + + if (count == 1) + return ethsw_dellink_switch(priv, vid); + } + + err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle, + vid, &vcfg); + if (err) { + netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err); + return err; + } + port_priv->vlans[vid] = 0; + return 0; +} + +static int ethsw_dellink(struct net_device *netdev, + struct nlmsghdr *nlh, + u16 flags) +{ + struct nlattr *tb[IFLA_BRIDGE_MAX + 1]; + struct nlattr *spec; + struct bridge_vlan_info *vinfo; + struct ethsw_dev_priv *priv; + struct ethsw_port_priv *port_priv = NULL; + int err = 0; + + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!spec) + return 0; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_VLAN_INFO]) + return -EOPNOTSUPP; + + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); + + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) + return -EINVAL; + + __get_priv(netdev, &priv, &port_priv); + + /* decide if command targets switch device or port */ + if (!port_priv) + err = ethsw_dellink_switch(priv, vinfo->vid); + else + err = ethsw_dellink_port(priv, port_priv, vinfo->vid); + + return err; +} + +static const struct net_device_ops ethsw_ops = { + .ndo_open = ðsw_open, + .ndo_stop = ðsw_stop, + + .ndo_bridge_setlink = ðsw_setlink, + .ndo_bridge_getlink = ðsw_getlink, + .ndo_bridge_dellink = ðsw_dellink, + + .ndo_start_xmit = ðsw_dropframe, +}; + +/*--------------------------------------------------------------------------- */ +/* switch port netdevice ops */ + +static int _ethsw_port_carrier_state_sync(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_link_state state; + int err; + + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, &state); + if (unlikely(err)) { + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); + return err; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + if (state.up) + netif_carrier_on(port_priv->netdev); + else + netif_carrier_off(port_priv->netdev); + + return 0; +} + +static int ethsw_port_open(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index); + if (err) { + netdev_err(netdev, "dpsw_if_enable err %d\n", err); + return err; + } + + /* sync carrier state */ + err = _ethsw_port_carrier_state_sync(netdev); + if (err) { + netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n", + err); + goto err_carrier_sync; + } + + return 0; + +err_carrier_sync: + dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index); + return err; +} + +static int ethsw_port_stop(struct net_device *netdev) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index); + if (err) { + netdev_err(netdev, "dpsw_if_disable err %d\n", err); + return err; + } + + return 0; +} + +static int ethsw_port_fdb_add_uc(struct net_device *netdev, + const unsigned char *addr) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_fdb_unicast_cfg entry = {0}; + int err; + + entry.if_egress = port_priv->port_index; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + 0, &entry); + if (err) + netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err); + return err; +} + +static int ethsw_port_fdb_del_uc(struct net_device *netdev, + const unsigned char *addr) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_fdb_unicast_cfg entry = {0}; + int err; + + entry.if_egress = port_priv->port_index; + entry.type = DPSW_FDB_ENTRY_STATIC; + ether_addr_copy(entry.mac_addr, addr); + + err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + 0, &entry); + if (err) + netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err); + return err; +} + +static int ethsw_port_fdb_add_mc(struct net_device *netdev, + const unsigned char *addr) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_fdb_multicast_cfg entry = {0}; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->port_index; + + err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + 0, &entry); + if (err) + netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err); + return err; +} + +static int ethsw_port_fdb_del_mc(struct net_device *netdev, + const unsigned char *addr) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_fdb_multicast_cfg entry = {0}; + int err; + + ether_addr_copy(entry.mac_addr, addr); + entry.type = DPSW_FDB_ENTRY_STATIC; + entry.num_ifs = 1; + entry.if_id[0] = port_priv->port_index; + + err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + 0, &entry); + if (err) + netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err); + return err; +} + +static int _lookup_address(struct net_device *netdev, int is_uc, + const unsigned char *addr) +{ + struct netdev_hw_addr *ha; + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; + + netif_addr_lock_bh(netdev); + list_for_each_entry(ha, &list->list, list) { + if (ether_addr_equal(ha->addr, addr)) { + netif_addr_unlock_bh(netdev); + return 1; + } + } + netif_addr_unlock_bh(netdev); + return 0; +} + +static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + struct list_head *pos; + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_dev_priv *priv = port_priv->ethsw_priv; + int err; + + /* TODO: add replace support when added to iproute bridge */ + if (!(flags & NLM_F_REQUEST)) { + netdev_err(netdev, + "ethsw_port_fdb_add unexpected flags value %08x\n", + flags); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr)) { + /* if entry cannot be replaced, return error if exists */ + if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) { + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, + struct ethsw_port_priv, + list); + if (_lookup_address(port_priv->netdev, + 1, addr)) + return -EEXIST; + } + } + + err = ethsw_port_fdb_add_uc(netdev, addr); + if (err) { + netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n", + err); + return err; + } + + /* we might have replaced an existing entry for a different + * switch port, make sure the address doesn't linger in any + * port address list + */ + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct ethsw_port_priv, + list); + dev_uc_del(port_priv->netdev, addr); + } + + err = dev_uc_add(netdev, addr); + if (err) { + netdev_err(netdev, "dev_uc_add err %d\n", err); + return err; + } + } else { + struct dpsw_fdb_multicast_cfg entry = { + .type = DPSW_FDB_ENTRY_STATIC, + .num_ifs = 0, + }; + + /* check if address is already set on this port */ + if (_lookup_address(netdev, 0, addr)) + return -EEXIST; + + /* check if the address exists on other port */ + ether_addr_copy(entry.mac_addr, addr); + err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle, + 0, &entry); + if (!err) { + /* entry exists, can we replace it? */ + if (flags & NLM_F_EXCL) + return -EEXIST; + } else if (err != -ENAVAIL) { + netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n", + err); + return err; + } + + err = ethsw_port_fdb_add_mc(netdev, addr); + if (err) { + netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n", + err); + return err; + } + + err = dev_mc_add(netdev, addr); + if (err) { + netdev_err(netdev, "dev_mc_add err %d\n", err); + return err; + } + } + + return 0; +} + +static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid) +{ + int err; + + if (is_unicast_ether_addr(addr)) { + err = ethsw_port_fdb_del_uc(netdev, addr); + if (err) { + netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n", + err); + return err; + } + + /* also delete if configured on port */ + err = dev_uc_del(netdev, addr); + if (err && err != -ENOENT) { + netdev_err(netdev, "dev_uc_del err %d\n", err); + return err; + } + } else { + if (!_lookup_address(netdev, 0, addr)) + return -ENOENT; + + err = dev_mc_del(netdev, addr); + if (err) { + netdev_err(netdev, "dev_mc_del err %d\n", err); + return err; + } + + err = ethsw_port_fdb_del_mc(netdev, addr); + if (err) { + netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n", + err); + return err; + } + } + + return 0; +} + +struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u64 tmp; + int err; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_ING_FRAME, &storage->rx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_EGR_FRAME, &storage->tx_packets); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_ING_BYTE, &storage->rx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_EGR_BYTE, &storage->tx_bytes); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_ING_FRAME_DISCARD, + &storage->rx_dropped); + if (err) + goto error; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_ING_FLTR_FRAME, + &tmp); + if (err) + goto error; + storage->rx_dropped += tmp; + + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + DPSW_CNT_EGR_FRAME_DISCARD, + &storage->tx_dropped); + if (err) + goto error; + + return storage; + +error: + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); +} + +static int ethsw_port_change_mtu(struct net_device *netdev, int mtu) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) { + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n", + mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH); + return -EINVAL; + } + + err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io, + 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + (u16)ETHSW_L2_MAX_FRM(mtu)); + if (err) { + netdev_err(netdev, + "dpsw_if_set_max_frame_length() err %d\n", err); + return err; + } + + netdev->mtu = mtu; + return 0; +} + +static const struct net_device_ops ethsw_port_ops = { + .ndo_open = ðsw_port_open, + .ndo_stop = ðsw_port_stop, + + .ndo_fdb_add = ðsw_port_fdb_add, + .ndo_fdb_del = ðsw_port_fdb_del, + .ndo_fdb_dump = &ndo_dflt_fdb_dump, + + .ndo_get_stats64 = ðsw_port_get_stats, + .ndo_change_mtu = ðsw_port_change_mtu, + + .ndo_start_xmit = ðsw_dropframe, +}; + +static void ethsw_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u16 version_major, version_minor; + int err; + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version)); + + err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0, + &version_major, + &version_minor); + if (err) + strlcpy(drvinfo->fw_version, "N/A", + sizeof(drvinfo->fw_version)); + else + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u", version_major, version_minor); + + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int ethsw_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_link_state state = {0}; + int err = 0; + + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + &state); + if (err) { + netdev_err(netdev, "ERROR %d getting link state", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPSW side or there may not exist a DPMAC at all. + * Report only autoneg state, duplexity and speed. + */ + if (state.options & DPSW_LINK_OPT_AUTONEG) + cmd->autoneg = AUTONEG_ENABLE; + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX)) + cmd->autoneg = DUPLEX_FULL; + ethtool_cmd_speed_set(cmd, state.rate); + +out: + return err; +} + +static int ethsw_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct dpsw_link_state state = {0}; + struct dpsw_link_cfg cfg = {0}; + int err = 0; + + netdev_dbg(netdev, "Setting link parameters..."); + + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + &state); + if (err) { + netdev_err(netdev, "ERROR %d getting link state", err); + goto out; + } + + /* Due to a temporary MC limitation, the DPSW port must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ + if (netif_running(netdev)) { + netdev_info(netdev, + "Sorry, interface must be brought down first.\n"); + return -EACCES; + } + + cfg.options = state.options; + cfg.rate = ethtool_cmd_speed(cmd); + if (cmd->autoneg == AUTONEG_ENABLE) + cfg.options |= DPSW_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPSW_LINK_OPT_AUTONEG; + if (cmd->duplex == DUPLEX_HALF) + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX; + + err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(netdev, "ERROR %d setting link cfg", err); + +out: + return err; +} + +static struct { + enum dpsw_counter id; + char name[ETH_GSTRING_LEN]; +} ethsw_ethtool_counters[] = { + {DPSW_CNT_ING_FRAME, "rx frames"}, + {DPSW_CNT_ING_BYTE, "rx bytes"}, + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"}, + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, + {DPSW_CNT_EGR_FRAME, "tx frames"}, + {DPSW_CNT_EGR_BYTE, "tx bytes"}, + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, + +}; + +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethsw_ethtool_counters); + default: + return -EOPNOTSUPP; + } +} + +static void ethsw_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u32 i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) + memcpy(data + i * ETH_GSTRING_LEN, + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void ethsw_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u32 i; + int err; + + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) { + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0, + port_priv->ethsw_priv->dpsw_handle, + port_priv->port_index, + ethsw_ethtool_counters[i].id, + &data[i]); + if (err) + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n", + ethsw_ethtool_counters[i].name, err); + } +} + +static const struct ethtool_ops ethsw_port_ethtool_ops = { + .get_drvinfo = ðsw_get_drvinfo, + .get_link = ðtool_op_get_link, + .get_settings = ðsw_get_settings, + .set_settings = ðsw_set_settings, + .get_strings = ðsw_ethtool_get_strings, + .get_ethtool_stats = ðsw_ethtool_get_stats, + .get_sset_count = ðsw_ethtool_get_sset_count, +}; + +/* -------------------------------------------------------------------------- */ +/* ethsw driver functions */ + +static int ethsw_links_state_update(struct ethsw_dev_priv *priv) +{ + struct list_head *pos; + struct ethsw_port_priv *port_priv; + int err; + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct ethsw_port_priv, + list); + + err = _ethsw_port_carrier_state_sync(port_priv->netdev); + if (err) + netdev_err(port_priv->netdev, + "_ethsw_port_carrier_state_sync err %d\n", + err); + } + + return 0; +} + +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct net_device *netdev = dev_get_drvdata(dev); + struct ethsw_dev_priv *priv = netdev_priv(netdev); + + struct fsl_mc_io *io = priv->mc_io; + u16 token = priv->dpsw_handle; + int irq_index = DPSW_IRQ_INDEX_IF; + + /* Mask the events and the if_id reserved bits to be cleared on read */ + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; + int err; + + err = dpsw_get_irq_status(io, 0, token, irq_index, &status); + if (unlikely(err)) { + netdev_err(netdev, "Can't get irq status (err %d)", err); + + err = dpsw_clear_irq_status(io, 0, token, irq_index, + 0xFFFFFFFF); + if (unlikely(err)) + netdev_err(netdev, "Can't clear irq status (err %d)", + err); + goto out; + } + + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { + err = ethsw_links_state_update(priv); + if (unlikely(err)) + goto out; + } + +out: + return IRQ_HANDLED; +} + +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct ethsw_dev_priv *priv = netdev_priv(netdev); + int err = 0; + struct fsl_mc_device_irq *irq; + const int irq_index = DPSW_IRQ_INDEX_IF; + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; + + err = fsl_mc_allocate_irqs(sw_dev); + if (unlikely(err)) { + dev_err(dev, "MC irqs allocation failed\n"); + return err; + } + + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) { + err = -EINVAL; + goto free_irq; + } + + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, + irq_index, 0); + if (unlikely(err)) { + dev_err(dev, "dpsw_set_irq_enable err %d\n", err); + goto free_irq; + } + + irq = sw_dev->irqs[irq_index]; + + err = devm_request_threaded_irq(dev, irq->msi_desc->irq, + ethsw_irq0_handler, + _ethsw_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), dev); + if (unlikely(err)) { + dev_err(dev, "devm_request_threaded_irq(): %d", err); + goto free_irq; + } + + err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle, + irq_index, mask); + if (unlikely(err)) { + dev_err(dev, "dpsw_set_irq_mask(): %d", err); + goto free_devm_irq; + } + + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, + irq_index, 1); + if (unlikely(err)) { + dev_err(dev, "dpsw_set_irq_enable(): %d", err); + goto free_devm_irq; + } + + return 0; + +free_devm_irq: + devm_free_irq(dev, irq->msi_desc->irq, dev); +free_irq: + fsl_mc_free_irqs(sw_dev); + return err; +} + +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct ethsw_dev_priv *priv = netdev_priv(netdev); + + dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle, + DPSW_IRQ_INDEX_IF, 0); + devm_free_irq(dev, + sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq, + dev); + fsl_mc_free_irqs(sw_dev); +} + +static int __cold +ethsw_init(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct ethsw_dev_priv *priv; + struct net_device *netdev; + int err = 0; + u16 i; + u16 version_major, version_minor; + const struct dpsw_stp_cfg stp_cfg = { + .vlan_id = 1, + .state = DPSW_STP_STATE_FORWARDING, + }; + + netdev = dev_get_drvdata(dev); + priv = netdev_priv(netdev); + + priv->dev_id = sw_dev->obj_desc.id; + + err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_open err %d\n", err); + goto err_exit; + } + if (!priv->dpsw_handle) { + dev_err(dev, "dpsw_open returned null handle but no error\n"); + err = -EFAULT; + goto err_exit; + } + + err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle, + &priv->sw_attr); + if (err) { + dev_err(dev, "dpsw_get_attributes err %d\n", err); + goto err_close; + } + + err = dpsw_get_api_version(priv->mc_io, 0, + &version_major, + &version_minor); + if (err) { + dev_err(dev, "dpsw_get_api_version err %d\n", err); + goto err_close; + } + + /* Minimum supported DPSW version check */ + if (version_major < DPSW_MIN_VER_MAJOR || + (version_major == DPSW_MIN_VER_MAJOR && + version_minor < DPSW_MIN_VER_MINOR)) { + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n", + version_major, + version_minor, + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR); + err = -ENOTSUPP; + goto err_close; + } + + err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle); + if (err) { + dev_err(dev, "dpsw_reset err %d\n", err); + goto err_close; + } + + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0, + DPSW_FDB_LEARNING_MODE_HW); + if (err) { + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err); + goto err_close; + } + + for (i = 0; i < priv->sw_attr.num_ifs; i++) { + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i, + &stp_cfg); + if (err) { + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", + err, i); + goto err_close; + } + + err = dpsw_if_set_broadcast(priv->mc_io, 0, + priv->dpsw_handle, i, 1); + if (err) { + dev_err(dev, + "dpsw_if_set_broadcast err %d for port %d\n", + err, i); + goto err_close; + } + } + + return 0; + +err_close: + dpsw_close(priv->mc_io, 0, priv->dpsw_handle); +err_exit: + return err; +} + +static int __cold +ethsw_takedown(struct fsl_mc_device *sw_dev) +{ + struct device *dev = &sw_dev->dev; + struct net_device *netdev; + struct ethsw_dev_priv *priv; + int err; + + netdev = dev_get_drvdata(dev); + priv = netdev_priv(netdev); + + err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle); + if (err) + dev_warn(dev, "dpsw_close err %d\n", err); + + return 0; +} + +static int __cold +ethsw_remove(struct fsl_mc_device *sw_dev) +{ + struct device *dev; + struct net_device *netdev; + struct ethsw_dev_priv *priv; + struct ethsw_port_priv *port_priv; + struct list_head *pos; + + dev = &sw_dev->dev; + netdev = dev_get_drvdata(dev); + priv = netdev_priv(netdev); + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct ethsw_port_priv, list); + + rtnl_lock(); + netdev_upper_dev_unlink(port_priv->netdev, netdev); + rtnl_unlock(); + + unregister_netdev(port_priv->netdev); + free_netdev(port_priv->netdev); + } + + ethsw_teardown_irqs(sw_dev); + + unregister_netdev(netdev); + + ethsw_takedown(sw_dev); + fsl_mc_portal_free(priv->mc_io); + + dev_set_drvdata(dev, NULL); + free_netdev(netdev); + + return 0; +} + +static int __cold +ethsw_probe(struct fsl_mc_device *sw_dev) +{ + struct device *dev; + struct net_device *netdev = NULL; + struct ethsw_dev_priv *priv = NULL; + int err = 0; + u16 i; + const char def_mcast[ETH_ALEN] = { + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01, + }; + char port_name[IFNAMSIZ]; + + dev = &sw_dev->dev; + + /* register switch device, it's for management only - no I/O */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + dev_err(dev, "alloc_etherdev error\n"); + return -ENOMEM; + } + netdev->netdev_ops = ðsw_ops; + + SET_NETDEV_DEV(netdev, dev); + dev_set_drvdata(dev, netdev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + + err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io); + if (err) { + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_free_netdev; + } + if (!priv->mc_io) { + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); + err = -EFAULT; + goto err_free_netdev; + } + + err = ethsw_init(sw_dev); + if (err) { + dev_err(dev, "switch init err %d\n", err); + goto err_free_cmdport; + } + + netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER; + + /* TODO: should we hold rtnl_lock here? We can't register_netdev under + * lock + */ + dev_alloc_name(netdev, "sw%d"); + err = register_netdev(netdev); + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + goto err_takedown; + } + if (err) + dev_info(dev, "register_netdev res %d\n", err); + + /* VLAN 1 is implicitly configured on the switch */ + priv->vlans[1] = ETHSW_VLAN_MEMBER; + /* Flooding, learning are implicitly enabled */ + priv->learning = true; + priv->flood = true; + + /* register switch ports */ + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); + + INIT_LIST_HEAD(&priv->port_list); + for (i = 0; i < priv->sw_attr.num_ifs; i++) { + struct net_device *port_netdev; + struct ethsw_port_priv *port_priv; + + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); + if (!port_netdev) { + dev_err(dev, "alloc_etherdev error\n"); + goto err_takedown; + } + + port_priv = netdev_priv(port_netdev); + port_priv->netdev = port_netdev; + port_priv->ethsw_priv = priv; + + port_priv->port_index = i; + port_priv->stp_state = BR_STATE_FORWARDING; + /* VLAN 1 is configured by default on all switch ports */ + port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED | + ETHSW_VLAN_PVID; + + SET_NETDEV_DEV(port_netdev, dev); + port_netdev->netdev_ops = ðsw_port_ops; + port_netdev->ethtool_ops = ðsw_port_ethtool_ops; + + port_netdev->flags = port_netdev->flags | + IFF_PROMISC | IFF_SLAVE; + + dev_alloc_name(port_netdev, port_name); + err = register_netdev(port_netdev); + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + free_netdev(port_netdev); + goto err_takedown; + } + + rtnl_lock(); + + err = netdev_master_upper_dev_link(port_netdev, netdev, + NULL, NULL); + if (err) { + dev_err(dev, "netdev_master_upper_dev_link error %d\n", + err); + unregister_netdev(port_netdev); + free_netdev(port_netdev); + rtnl_unlock(); + goto err_takedown; + } + + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL); + + rtnl_unlock(); + + list_add(&port_priv->list, &priv->port_list); + + /* TODO: implmenet set_rm_mode instead of this */ + err = ethsw_port_fdb_add_mc(port_netdev, def_mcast); + if (err) + dev_warn(&netdev->dev, + "ethsw_port_fdb_add_mc err %d\n", err); + } + + /* the switch starts up enabled */ + rtnl_lock(); + err = dev_open(netdev); + rtnl_unlock(); + if (err) + dev_warn(dev, "dev_open err %d\n", err); + + /* setup irqs */ + err = ethsw_setup_irqs(sw_dev); + if (unlikely(err)) { + dev_warn(dev, "ethsw_setup_irqs err %d\n", err); + goto err_takedown; + } + + dev_info(&netdev->dev, + "probed %d port switch\n", priv->sw_attr.num_ifs); + return 0; + +err_takedown: + ethsw_remove(sw_dev); +err_free_cmdport: + fsl_mc_portal_free(priv->mc_io); +err_free_netdev: + dev_set_drvdata(dev, NULL); + free_netdev(netdev); + + return err; +} + +static const struct fsl_mc_device_id ethsw_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpsw", + }, + {} +}; + +static struct fsl_mc_driver eth_sw_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = ethsw_probe, + .remove = ethsw_remove, + .match_id_table = ethsw_match_id_table, +}; + +module_fsl_mc_driver(eth_sw_drv); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)"); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig @@ -0,0 +1,7 @@ +config FSL_DPAA2_EVB + tristate "DPAA2 Edge Virtual Bridge" + depends on FSL_MC_BUS && FSL_DPAA2 + select VLAN_8021Q + default y + ---help--- + Prototype driver for DPAA2 Edge Virtual Bridge. --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/Makefile @@ -0,0 +1,10 @@ + +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o + +dpaa2-evb-objs := evb.o dpdmux.o + +all: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules + +clean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h @@ -0,0 +1,279 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _FSL_DPDMUX_CMD_H +#define _FSL_DPDMUX_CMD_H + +/* DPDMUX Version */ +#define DPDMUX_VER_MAJOR 6 +#define DPDMUX_VER_MINOR 1 + +#define DPDMUX_CMD_BASE_VER 1 +#define DPDMUX_CMD_ID_OFFSET 4 + +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER) + +/* Command IDs */ +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800) +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806) +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906) +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986) +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06) + +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002) +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003) +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004) +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005) +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006) + +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012) +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013) +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014) +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015) +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016) +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017) + +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1) + +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3) + +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7) +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8) +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9) +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa) + +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0) +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1) +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2) +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3) +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4) + +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5) +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6) +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7) + +#define DPDMUX_MASK(field) \ + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \ + DPDMUX_##field##_SHIFT) +#define dpdmux_set_field(var, field, val) \ + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field))) +#define dpdmux_get_field(var, field) \ + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT) + +struct dpdmux_cmd_open { + u32 dpdmux_id; +}; + +struct dpdmux_cmd_create { + u8 method; + u8 manip; + u16 num_ifs; + u32 pad; + + u16 adv_max_dmat_entries; + u16 adv_max_mc_groups; + u16 adv_max_vlan_ids; + u16 pad1; + + u64 options; +}; + +struct dpdmux_cmd_destroy { + u32 dpdmux_id; +}; + +#define DPDMUX_ENABLE_SHIFT 0 +#define DPDMUX_ENABLE_SIZE 1 + +struct dpdmux_rsp_is_enabled { + u8 en; +}; + +struct dpdmux_cmd_set_irq_enable { + u8 enable; + u8 pad[3]; + u8 irq_index; +}; + +struct dpdmux_cmd_get_irq_enable { + u32 pad; + u8 irq_index; +}; + +struct dpdmux_rsp_get_irq_enable { + u8 enable; +}; + +struct dpdmux_cmd_set_irq_mask { + u32 mask; + u8 irq_index; +}; + +struct dpdmux_cmd_get_irq_mask { + u32 pad; + u8 irq_index; +}; + +struct dpdmux_rsp_get_irq_mask { + u32 mask; +}; + +struct dpdmux_cmd_get_irq_status { + u32 status; + u8 irq_index; +}; + +struct dpdmux_rsp_get_irq_status { + u32 status; +}; + +struct dpdmux_cmd_clear_irq_status { + u32 status; + u8 irq_index; +}; + +struct dpdmux_rsp_get_attr { + u8 method; + u8 manip; + u16 num_ifs; + u16 mem_size; + u16 pad; + + u64 pad1; + + u32 id; + u32 pad2; + + u64 options; +}; + +struct dpdmux_cmd_set_max_frame_length { + u16 max_frame_length; +}; + +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4 + +struct dpdmux_cmd_if_set_accepted_frames { + u16 if_id; + u8 frames_options; +}; + +struct dpdmux_cmd_if { + u16 if_id; +}; + +struct dpdmux_rsp_if_get_attr { + u8 pad[3]; + u8 enabled; + u8 pad1[3]; + u8 accepted_frames_type; + u32 rate; +}; + +struct dpdmux_cmd_if_l2_rule { + u16 if_id; + u8 mac_addr5; + u8 mac_addr4; + u8 mac_addr3; + u8 mac_addr2; + u8 mac_addr1; + u8 mac_addr0; + + u32 pad; + u16 vlan_id; +}; + +struct dpdmux_cmd_if_get_counter { + u16 if_id; + u8 counter_type; +}; + +struct dpdmux_rsp_if_get_counter { + u64 pad; + u64 counter; +}; + +struct dpdmux_cmd_if_set_link_cfg { + u16 if_id; + u16 pad[3]; + + u32 rate; + u32 pad1; + + u64 options; +}; + +struct dpdmux_cmd_if_get_link_state { + u16 if_id; +}; + +struct dpdmux_rsp_if_get_link_state { + u32 pad; + u8 up; + u8 pad1[3]; + + u32 rate; + u32 pad2; + + u64 options; +}; + +struct dpdmux_rsp_get_api_version { + u16 major; + u16 minor; +}; + +struct dpdmux_set_custom_key { + u64 pad[6]; + u64 key_cfg_iova; +}; + +struct dpdmux_cmd_add_custom_cls_entry { + u8 pad[3]; + u8 key_size; + u16 pad1; + u16 dest_if; + u64 key_iova; + u64 mask_iova; +}; + +struct dpdmux_cmd_remove_custom_cls_entry { + u8 pad[3]; + u8 key_size; + u32 pad1; + u64 key_iova; + u64 mask_iova; +}; + +#endif /* _FSL_DPDMUX_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c @@ -0,0 +1,1112 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "../../fsl-mc/include/mc-sys.h" +#include "../../fsl-mc/include/mc-cmd.h" +#include "dpdmux.h" +#include "dpdmux-cmd.h" + +/** + * dpdmux_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmux_id: DPDMUX unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmux_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpdmux_id, + u16 *token) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_open *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpdmux_cmd_open *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmux_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_create() - Create the DPDMUX object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: returned object id + * + * Create the DPDMUX object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpdmux_cfg *cfg, + u32 *obj_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_create *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_create *)cmd.params; + cmd_params->method = cfg->method; + cmd_params->manip = cfg->manip; + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + cmd_params->adv_max_dmat_entries = + cpu_to_le16(cfg->adv.max_dmat_entries); + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups); + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids); + cmd_params->options = cpu_to_le64(cfg->adv.options); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpdmux_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + u32 object_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_destroy *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_enable() - Enable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_disable() - Disable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_is_enabled() - Check if the DPDMUX is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params; + *en = dpdmux_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @en: Interrupt state - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params; + cmd_params->enable = en; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_irq_enable() - Get overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_get_irq_enable *cmd_params; + struct dpdmux_rsp_get_irq_enable *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params; + *en = rsp_params->enable; + + return 0; +} + +/** + * dpdmux_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @mask: event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_get_irq_mask *cmd_params; + struct dpdmux_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpdmux_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_get_irq_status *cmd_params; + struct dpdmux_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpdmux_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_attributes() - Retrieve DPDMUX attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->options = le64_to_cpu(rsp_params->options); + attr->method = rsp_params->method; + attr->manip = rsp_params->manip; + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + attr->mem_size = le16_to_cpu(rsp_params->mem_size); + + return 0; +} + +/** + * dpdmux_if_enable() - Enable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_disable() - Disable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @max_frame_length: The required maximum frame length + * + * Update the maximum frame length on all DMUX interfaces. + * In case of VEPA, the maximum frame length on all dmux interfaces + * will be updated with the minimum value of the mfls of the connected + * dpnis and the actual value of dmux mfl. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_ul_reset_counters() - Function resets the uplink counter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_set_accepted_frames() - Set the accepted frame types + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @cfg: Frame types configuration + * + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or + * priority-tagged frames are discarded. + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or + * priority-tagged frames are accepted. + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, + * untagged and priority-tagged frame are accepted; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_accepted_frames *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_accepted_frames *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE, + cfg->type); + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION, + cfg->unaccept_act); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @attr: Interface attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_if_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if *cmd_params; + struct dpdmux_rsp_if_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE); + attr->accept_frame_type = + dpdmux_get_field(rsp_params->accepted_frames_type, + ACCEPTED_FRAMES_TYPE); + + return 0; +} + +/** + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function removes a L2 rule from DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function adds a L2 rule into DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Id + * @counter_type: counter type + * @counter: Returned specific counter information + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpdmux_counter_type counter_type, + u64 *counter) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_counter *cmd_params; + struct dpdmux_rsp_if_get_counter *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->counter_type = counter_type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params; + *counter = le64_to_cpu(rsp_params->counter); + + return 0; +} + +/** + * dpdmux_if_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_link_state - Return the link state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @state: link state + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_link_state *cmd_params; + struct dpdmux_rsp_if_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params; + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->up = dpdmux_get_field(rsp_params->up, ENABLE); + + return 0; +} + +/** + * dpdmux_set_custom_key - Set a custom classification key. + * + * This API is only available for DPDMUX instance created with + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the + * classification table using dpdmux_add_custom_cls_entry. + * + * Calls to dpdmux_set_custom_key remove all existing classification entries + * that may have been added previously using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @key_cfg_iova: DMA address of a configuration structure set up using + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes. + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u64 key_cfg_iova) +{ + struct dpdmux_set_custom_key *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY, + cmd_flags, + token); + cmd_params = (struct dpdmux_set_custom_key *)cmd.params; + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_add_custom_cls_entry - Adds a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key + * composition rule must be set up using dpdmux_set_custom_key. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to insert. Rules cannot be duplicated, if a + * matching rule already exists, the action will be replaced. + * @action: Action to perform for matching traffic. + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action) +{ + struct dpdmux_cmd_add_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->dest_if = cpu_to_le16(action->dest_if); + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification + * entries previously inserted using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to remove + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_rule_cfg *rule) +{ + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_api_version() - Get Data Path Demux API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path demux API + * @minor_ver: Minor version of data path demux API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_api_version *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h @@ -0,0 +1,453 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPDMUX_H +#define __FSL_DPDMUX_H + +struct fsl_mc_io; + +/* Data Path Demux API + * Contains API for handling DPDMUX topology and functionality + */ + +int dpdmux_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpdmux_id, + u16 *token); + +int dpdmux_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPDMUX general options + */ + +/** + * Enable bridging between internal interfaces + */ +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL + +/** + * Mask support for classification + */ +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL + +#define DPDMUX_IRQ_INDEX_IF 0x0000 +#define DPDMUX_IRQ_INDEX 0x0001 + +/** + * IRQ event - Indicates that the link state changed + */ +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * enum dpdmux_manip - DPDMUX manipulation operations + * @DPDMUX_MANIP_NONE: No manipulation on frames + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress + */ +enum dpdmux_manip { + DPDMUX_MANIP_NONE = 0x0, + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 +}; + +/** + * enum dpdmux_method - DPDMUX method options + * @DPDMUX_METHOD_NONE: no DPDMUX method + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN + */ +enum dpdmux_method { + DPDMUX_METHOD_NONE = 0x0, + DPDMUX_METHOD_C_VLAN_MAC = 0x1, + DPDMUX_METHOD_MAC = 0x2, + DPDMUX_METHOD_C_VLAN = 0x3, + DPDMUX_METHOD_S_VLAN = 0x4, + DPDMUX_METHOD_CUSTOM = 0x5 +}; + +/** + * struct dpdmux_cfg - DPDMUX configuration parameters + * @method: Defines the operation method for the DPDMUX address table + * @manip: Required manipulation operation + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @adv: Advanced parameters; default is all zeros; + * use this structure to change default settings + */ +struct dpdmux_cfg { + enum dpdmux_method method; + enum dpdmux_manip manip; + u16 num_ifs; + /** + * struct adv - Advanced parameters + * @options: DPDMUX options - combination of 'DPDMUX_OPT_' flags + * @max_dmat_entries: Maximum entries in DPDMUX address table + * 0 - indicates default: 64 entries per interface. + * @max_mc_groups: Number of multicast groups in DPDMUX table + * 0 - indicates default: 32 multicast groups + * @max_vlan_ids: max vlan ids allowed in the system - + * relevant only case of working in mac+vlan method. + * 0 - indicates default 16 vlan ids. + */ + struct { + u64 options; + u16 max_dmat_entries; + u16 max_mc_groups; + u16 max_vlan_ids; + } adv; +}; + +int dpdmux_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpdmux_cfg *cfg, + u32 *obj_id); + +int dpdmux_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + u32 object_id); + +int dpdmux_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpdmux_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpdmux_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpdmux_attr - Structure representing DPDMUX attributes + * @id: DPDMUX object ID + * @options: Configuration options (bitmap) + * @method: DPDMUX address table method + * @manip: DPDMUX manipulation type + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @mem_size: DPDMUX frame storage memory size + */ +struct dpdmux_attr { + int id; + u64 options; + enum dpdmux_method method; + enum dpdmux_manip manip; + u16 num_ifs; + u16 mem_size; +}; + +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_attr *attr); + +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length); + +/** + * enum dpdmux_counter_type - Counter types + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + */ +enum dpdmux_counter_type { + DPDMUX_CNT_ING_FRAME = 0x0, + DPDMUX_CNT_ING_BYTE = 0x1, + DPDMUX_CNT_ING_FLTR_FRAME = 0x2, + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, + DPDMUX_CNT_ING_MCAST_FRAME = 0x4, + DPDMUX_CNT_ING_MCAST_BYTE = 0x5, + DPDMUX_CNT_ING_BCAST_FRAME = 0x6, + DPDMUX_CNT_ING_BCAST_BYTES = 0x7, + DPDMUX_CNT_EGR_FRAME = 0x8, + DPDMUX_CNT_EGR_BYTE = 0x9, + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa +}; + +/** + * enum dpdmux_accepted_frames_type - DPDMUX frame types + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority-tagged frames + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * priority-tagged frames that are received on this + * interface + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames + * received on this interface are accepted + */ +enum dpdmux_accepted_frames_type { + DPDMUX_ADMIT_ALL = 0, + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, + DPDMUX_ADMIT_ONLY_UNTAGGED = 2 +}; + +/** + * enum dpdmux_action - DPDMUX action for un-accepted frames + * @DPDMUX_ACTION_DROP: Drop un-accepted frames + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the + * control interface + */ +enum dpdmux_action { + DPDMUX_ACTION_DROP = 0, + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 +}; + +/** + * struct dpdmux_accepted_frames - Frame types configuration + * @type: Defines ingress accepted frames + * @unaccept_act: Defines action on frames not accepted + */ +struct dpdmux_accepted_frames { + enum dpdmux_accepted_frames_type type; + enum dpdmux_action unaccept_act; +}; + +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_accepted_frames *cfg); + +/** + * struct dpdmux_if_attr - Structure representing frame types configuration + * @rate: Configured interface rate (in bits per second) + * @enabled: Indicates if interface is enabled + * @accept_frame_type: Indicates type of accepted frames for the interface + */ +struct dpdmux_if_attr { + u32 rate; + int enabled; + enum dpdmux_accepted_frames_type accept_frame_type; +}; + +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_if_attr *attr); + +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id); + +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id); + +/** + * struct dpdmux_l2_rule - Structure representing L2 rule + * @mac_addr: MAC address + * @vlan_id: VLAN ID + */ +struct dpdmux_l2_rule { + u8 mac_addr[6]; + u16 vlan_id; +}; + +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + enum dpdmux_counter_type counter_type, + u64 *counter); + +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * Enable auto-negotiation + */ +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values + */ +struct dpdmux_link_cfg { + u32 rate; + u64 options; +}; + +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_link_cfg *cfg); +/** + * struct dpdmux_link_state - Structure representing DPDMUX link state + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_' values + * @up: 0 - down, 1 - up + */ +struct dpdmux_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 if_id, + struct dpdmux_link_state *state); + +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u64 key_cfg_iova); + +/** + * struct dpdmux_rule_cfg - Custom classification rule. + * + * @key_iova: DMA address of buffer storing the look-up value + * @mask_iova: DMA address of the mask used for TCAM classification + * @key_size: size, in bytes, of the look-up value. This must match the size + * of the look-up key defined using dpdmux_set_custom_key, otherwise the + * entry will never be hit + */ +struct dpdmux_rule_cfg { + u64 key_iova; + u64 mask_iova; + u8 key_size; +}; + +/** + * struct dpdmux_cls_action - Action to execute for frames matching the + * classification entry + * + * @dest_if: Interface to forward the frames to. Port numbering is similar to + * the one used to connect interfaces: + * - 0 is the uplink port, + * - all others are downlink ports. + */ +struct dpdmux_cls_action { + u16 dest_if; +}; + +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action); + +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpdmux_rule_cfg *rule); + +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +#endif /* __FSL_DPDMUX_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/evb/evb.c @@ -0,0 +1,1350 @@ +/* Copyright 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../../fsl-mc/include/mc.h" + +#include "dpdmux.h" +#include "dpdmux-cmd.h" + +static const char evb_drv_version[] = "0.1"; + +/* Minimal supported DPDMUX version */ +#define DPDMUX_MIN_VER_MAJOR 6 +#define DPDMUX_MIN_VER_MINOR 0 + +/* IRQ index */ +#define DPDMUX_MAX_IRQ_NUM 2 + +/* MAX FRAME LENGTH (currently 10k) */ +#define EVB_MAX_FRAME_LENGTH (10 * 1024) +/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */ +#define EVB_MIN_FRAME_LENGTH 68 + +struct evb_port_priv { + struct net_device *netdev; + struct list_head list; + u16 port_index; + struct evb_priv *evb_priv; + u8 vlans[VLAN_VID_MASK + 1]; +}; + +struct evb_priv { + /* keep first */ + struct evb_port_priv uplink; + + struct fsl_mc_io *mc_io; + struct list_head port_list; + struct dpdmux_attr attr; + u16 mux_handle; + int dev_id; +}; + +static int _evb_port_carrier_state_sync(struct net_device *netdev) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct dpdmux_link_state state; + int err; + + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, &state); + if (unlikely(err)) { + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err); + return err; + } + + WARN_ONCE(state.up > 1, "Garbage read into link_state"); + + if (state.up) + netif_carrier_on(port_priv->netdev); + else + netif_carrier_off(port_priv->netdev); + + return 0; +} + +static int evb_port_open(struct net_device *netdev) +{ + int err; + + /* FIXME: enable port when support added */ + + err = _evb_port_carrier_state_sync(netdev); + if (err) { + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n", + err); + return err; + } + + return 0; +} + +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev) +{ + /* we don't support I/O for now, drop the frame */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int evb_links_state_update(struct evb_priv *priv) +{ + struct evb_port_priv *port_priv; + struct list_head *pos; + int err; + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct evb_port_priv, list); + + err = _evb_port_carrier_state_sync(port_priv->netdev); + if (err) + netdev_err(port_priv->netdev, + "_evb_port_carrier_state_sync err %d\n", + err); + } + + return 0; +} + +static irqreturn_t evb_irq0_handler(int irq_num, void *arg) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev); + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + struct fsl_mc_io *io = priv->mc_io; + u16 token = priv->mux_handle; + int irq_index = DPDMUX_IRQ_INDEX_IF; + + /* Mask the events and the if_id reserved bits to be cleared on read */ + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; + int err; + + /* Sanity check */ + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index])) + goto out; + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num)) + goto out; + + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status); + if (unlikely(err)) { + netdev_err(netdev, "Can't get irq status (err %d)", err); + err = dpdmux_clear_irq_status(io, 0, token, irq_index, + 0xFFFFFFFF); + if (unlikely(err)) + netdev_err(netdev, "Can't clear irq status (err %d)", + err); + goto out; + } + + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) { + err = evb_links_state_update(priv); + if (unlikely(err)) + goto out; + } + +out: + return IRQ_HANDLED; +} + +static int evb_setup_irqs(struct fsl_mc_device *evb_dev) +{ + struct device *dev = &evb_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + int err = 0; + struct fsl_mc_device_irq *irq; + const int irq_index = DPDMUX_IRQ_INDEX_IF; + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED; + + err = fsl_mc_allocate_irqs(evb_dev); + if (unlikely(err)) { + dev_err(dev, "MC irqs allocation failed\n"); + return err; + } + + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) { + err = -EINVAL; + goto free_irq; + } + + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, + irq_index, 0); + if (unlikely(err)) { + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err); + goto free_irq; + } + + irq = evb_dev->irqs[irq_index]; + + err = devm_request_threaded_irq(dev, irq->msi_desc->irq, + evb_irq0_handler, + _evb_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), dev); + if (unlikely(err)) { + dev_err(dev, "devm_request_threaded_irq(): %d", err); + goto free_irq; + } + + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle, + irq_index, mask); + if (unlikely(err)) { + dev_err(dev, "dpdmux_set_irq_mask(): %d", err); + goto free_devm_irq; + } + + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, + irq_index, 1); + if (unlikely(err)) { + dev_err(dev, "dpdmux_set_irq_enable(): %d", err); + goto free_devm_irq; + } + + return 0; + +free_devm_irq: + devm_free_irq(dev, irq->msi_desc->irq, dev); +free_irq: + fsl_mc_free_irqs(evb_dev); + return err; +} + +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev) +{ + struct device *dev = &evb_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle, + DPDMUX_IRQ_INDEX_IF, 0); + + devm_free_irq(dev, + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq, + dev); + fsl_mc_free_irqs(evb_dev); +} + +static int evb_port_add_rule(struct net_device *netdev, + const unsigned char *addr, u16 vid) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct dpdmux_l2_rule rule = { .vlan_id = vid }; + int err; + + if (addr) + ether_addr_copy(rule.mac_addr, addr); + + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, &rule); + if (unlikely(err)) + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err); + return err; +} + +static int evb_port_del_rule(struct net_device *netdev, + const unsigned char *addr, u16 vid) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct dpdmux_l2_rule rule = { .vlan_id = vid }; + int err; + + if (addr) + ether_addr_copy(rule.mac_addr, addr); + + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, &rule); + if (unlikely(err)) + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err); + return err; +} + +static bool _lookup_address(struct net_device *netdev, + const unsigned char *addr) +{ + struct netdev_hw_addr *ha; + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ? + &netdev->uc : &netdev->mc; + + netif_addr_lock_bh(netdev); + list_for_each_entry(ha, &list->list, list) { + if (ether_addr_equal(ha->addr, addr)) { + netif_addr_unlock_bh(netdev); + return true; + } + } + netif_addr_unlock_bh(netdev); + return false; +} + +static inline int evb_port_fdb_prep(struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 *vid, + bool del) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct evb_priv *evb_priv = port_priv->evb_priv; + + *vid = 0; + + if (evb_priv->attr.method != DPDMUX_METHOD_MAC && + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) { + netdev_err(netdev, + "EVB mode does not support MAC classification\n"); + return -EOPNOTSUPP; + } + + /* check if the address is configured on this port */ + if (_lookup_address(netdev, addr)) { + if (!del) + return -EEXIST; + } else { + if (del) + return -ENOENT; + } + + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) { + netdev_err(netdev, "invalid vlan size %d\n", + nla_len(tb[NDA_VLAN])); + return -EINVAL; + } + + *vid = nla_get_u16(tb[NDA_VLAN]); + + if (!*vid || *vid >= VLAN_VID_MASK) { + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid); + return -EINVAL; + } + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) { + netdev_err(netdev, + "EVB mode requires explicit VLAN configuration\n"); + return -EINVAL; + } else if (tb[NDA_VLAN]) { + netdev_warn(netdev, "VLAN not supported, argument ignored\n"); + } + + return 0; +} + +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid, u16 flags) +{ + u16 _vid; + int err; + + /* TODO: add replace support when added to iproute bridge */ + if (!(flags & NLM_F_REQUEST)) { + netdev_err(netdev, + "evb_port_fdb_add unexpected flags value %08x\n", + flags); + return -EINVAL; + } + + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0); + if (unlikely(err)) + return err; + + err = evb_port_add_rule(netdev, addr, _vid); + if (unlikely(err)) + return err; + + if (is_unicast_ether_addr(addr)) { + err = dev_uc_add(netdev, addr); + if (unlikely(err)) { + netdev_err(netdev, "dev_uc_add err %d\n", err); + return err; + } + } else { + err = dev_mc_add(netdev, addr); + if (unlikely(err)) { + netdev_err(netdev, "dev_mc_add err %d\n", err); + return err; + } + } + + return 0; +} + +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid) +{ + u16 _vid; + int err; + + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1); + if (unlikely(err)) + return err; + + err = evb_port_del_rule(netdev, addr, _vid); + if (unlikely(err)) + return err; + + if (is_unicast_ether_addr(addr)) { + err = dev_uc_del(netdev, addr); + if (unlikely(err)) { + netdev_err(netdev, "dev_uc_del err %d\n", err); + return err; + } + } else { + err = dev_mc_del(netdev, addr); + if (unlikely(err)) { + netdev_err(netdev, "dev_mc_del err %d\n", err); + return err; + } + } + + return 0; +} + +static int evb_change_mtu(struct net_device *netdev, + int mtu) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct evb_priv *evb_priv = port_priv->evb_priv; + struct list_head *pos; + int err = 0; + + /* This operation is not permitted on downlinks */ + if (port_priv->port_index > 0) + return -EPERM; + + if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) { + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n", + mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH); + return -EINVAL; + } + + err = dpdmux_set_max_frame_length(evb_priv->mc_io, + 0, + evb_priv->mux_handle, + (uint16_t)mtu); + + if (unlikely(err)) { + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n", + err); + return err; + } + + /* Update the max frame length for downlinks */ + list_for_each(pos, &evb_priv->port_list) { + port_priv = list_entry(pos, struct evb_port_priv, list); + port_priv->netdev->mtu = mtu; + } + + netdev->mtu = mtu; + return 0; +} + +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = { + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 }, + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 }, + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY, + .len = sizeof(struct bridge_vlan_info), }, +}; + +static int evb_setlink_af_spec(struct net_device *netdev, + struct nlattr **tb) +{ + struct bridge_vlan_info *vinfo; + struct evb_port_priv *port_priv = netdev_priv(netdev); + int err = 0; + + if (!tb[IFLA_BRIDGE_VLAN_INFO]) { + netdev_err(netdev, "no VLAN INFO in nlmsg\n"); + return -EOPNOTSUPP; + } + + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); + + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) + return -EINVAL; + + err = evb_port_add_rule(netdev, NULL, vinfo->vid); + if (unlikely(err)) + return err; + + port_priv->vlans[vinfo->vid] = 1; + + return 0; +} + +static int evb_setlink(struct net_device *netdev, + struct nlmsghdr *nlh, + u16 flags) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct evb_priv *evb_priv = port_priv->evb_priv; + struct nlattr *attr; + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ? + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1]; + int err = 0; + + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { + netdev_err(netdev, + "EVB mode does not support VLAN only classification\n"); + return -EOPNOTSUPP; + } + + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (attr) { + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr, + ifla_br_policy); + if (unlikely(err)) { + netdev_err(netdev, + "nla_parse_nested for br_policy err %d\n", + err); + return err; + } + + err = evb_setlink_af_spec(netdev, tb); + return err; + } + + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n"); + return -EOPNOTSUPP; +} + +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct evb_priv *evb_priv = port_priv->evb_priv; + u8 operstate = netif_running(netdev) ? + netdev->operstate : IF_OPER_DOWN; + int iflink; + int err; + + err = nla_put_string(skb, IFLA_IFNAME, netdev->name); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate); + if (unlikely(err)) + goto nla_put_err; + if (netdev->addr_len) { + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len, + netdev->dev_addr); + if (unlikely(err)) + goto nla_put_err; + } + + iflink = dev_get_iflink(netdev); + if (netdev->ifindex != iflink) { + err = nla_put_u32(skb, IFLA_LINK, iflink); + if (unlikely(err)) + goto nla_put_err; + } + + return 0; + +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + return err; +} + +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev) +{ + struct nlattr *nest; + int err; + + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); + if (!nest) { + netdev_err(netdev, "nla_nest_start failed\n"); + return -ENOMEM; + } + + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0); + if (unlikely(err)) + goto nla_put_err; + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1); + if (unlikely(err)) + goto nla_put_err; + nla_nest_end(skb, nest); + + return 0; + +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + nla_nest_cancel(skb, nest); + return err; +} + +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct nlattr *nest; + struct bridge_vlan_info vinfo; + const u8 *vlans = port_priv->vlans; + u16 i; + int err; + + nest = nla_nest_start(skb, IFLA_AF_SPEC); + if (!nest) { + netdev_err(netdev, "nla_nest_start failed"); + return -ENOMEM; + } + + for (i = 0; i < VLAN_VID_MASK + 1; i++) { + if (!vlans[i]) + continue; + + vinfo.flags = 0; + vinfo.vid = i; + + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO, + sizeof(vinfo), &vinfo); + if (unlikely(err)) + goto nla_put_err; + } + + nla_nest_end(skb, nest); + + return 0; + +nla_put_err: + netdev_err(netdev, "nla_put_ err %d\n", err); + nla_nest_cancel(skb, nest); + return err; +} + +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *netdev, u32 filter_mask, int nlflags) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct evb_priv *evb_priv = port_priv->evb_priv; + struct ifinfomsg *hdr; + struct nlmsghdr *nlh; + int err; + + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN && + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) { + return 0; + } + + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + hdr = nlmsg_data(nlh); + memset(hdr, 0, sizeof(*hdr)); + hdr->ifi_family = AF_BRIDGE; + hdr->ifi_type = netdev->type; + hdr->ifi_index = netdev->ifindex; + hdr->ifi_flags = dev_get_flags(netdev); + + err = __nla_put_netdev(skb, netdev); + if (unlikely(err)) + goto nla_put_err; + + err = __nla_put_port(skb, netdev); + if (unlikely(err)) + goto nla_put_err; + + /* Check if the VID information is requested */ + if (filter_mask & RTEXT_FILTER_BRVLAN) { + err = __nla_put_vlan(skb, netdev); + if (unlikely(err)) + goto nla_put_err; + } + + nlmsg_end(skb, nlh); + return skb->len; + +nla_put_err: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int evb_dellink(struct net_device *netdev, + struct nlmsghdr *nlh, + u16 flags) +{ + struct nlattr *tb[IFLA_BRIDGE_MAX + 1]; + struct nlattr *spec; + struct bridge_vlan_info *vinfo; + struct evb_port_priv *port_priv = netdev_priv(netdev); + int err = 0; + + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!spec) + return 0; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy); + if (unlikely(err)) + return err; + + if (!tb[IFLA_BRIDGE_VLAN_INFO]) + return -EOPNOTSUPP; + + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); + + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK) + return -EINVAL; + + err = evb_port_del_rule(netdev, NULL, vinfo->vid); + if (unlikely(err)) { + netdev_err(netdev, "evb_port_del_rule err %d\n", err); + return err; + } + port_priv->vlans[vinfo->vid] = 0; + + return 0; +} + +struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + u64 tmp; + int err; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_ING_FRAME, &storage->rx_packets); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_ING_FLTR_FRAME, &tmp); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_ING_FRAME_DISCARD, + &storage->rx_dropped); + if (unlikely(err)) { + storage->rx_dropped = tmp; + goto error; + } + storage->rx_dropped += tmp; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_ING_MCAST_FRAME, + &storage->multicast); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes); + if (unlikely(err)) + goto error; + + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + DPDMUX_CNT_EGR_FRAME_DISCARD, + &storage->tx_dropped); + if (unlikely(err)) + goto error; + + return storage; + +error: + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err); +} + +static const struct net_device_ops evb_port_ops = { + .ndo_open = &evb_port_open, + + .ndo_start_xmit = &evb_dropframe, + + .ndo_fdb_add = &evb_port_fdb_add, + .ndo_fdb_del = &evb_port_fdb_del, + + .ndo_get_stats64 = &evb_port_get_stats, + .ndo_change_mtu = &evb_change_mtu, +}; + +static void evb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + u16 version_major, version_minor; + int err; + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version)); + + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0, + &version_major, + &version_minor); + if (err) + strlcpy(drvinfo->fw_version, "N/A", + sizeof(drvinfo->fw_version)); + else + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u", version_major, version_minor); + + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int evb_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct dpdmux_link_state state = {0}; + int err = 0; + + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + &state); + if (err) { + netdev_err(netdev, "ERROR %d getting link state", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPDMUX side or there may not exist a DPMAC at all. + * Report only autoneg state, duplexity and speed. + */ + if (state.options & DPDMUX_LINK_OPT_AUTONEG) + cmd->autoneg = AUTONEG_ENABLE; + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX)) + cmd->duplex = DUPLEX_FULL; + ethtool_cmd_speed_set(cmd, state.rate); + +out: + return err; +} + +static int evb_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + struct dpdmux_link_state state = {0}; + struct dpdmux_link_cfg cfg = {0}; + int err = 0; + + netdev_dbg(netdev, "Setting link parameters..."); + + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + &state); + if (err) { + netdev_err(netdev, "ERROR %d getting link state", err); + goto out; + } + + /* Due to a temporary MC limitation, the DPDMUX port must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ + if (netif_running(netdev)) { + netdev_info(netdev, + "Sorry, interface must be brought down first.\n"); + return -EACCES; + } + + cfg.options = state.options; + cfg.rate = ethtool_cmd_speed(cmd); + if (cmd->autoneg == AUTONEG_ENABLE) + cfg.options |= DPDMUX_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG; + if (cmd->duplex == DUPLEX_HALF) + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX; + + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(netdev, "ERROR %d setting link cfg", err); + +out: + return err; +} + +static struct { + enum dpdmux_counter_type id; + char name[ETH_GSTRING_LEN]; +} evb_ethtool_counters[] = { + {DPDMUX_CNT_ING_FRAME, "rx frames"}, + {DPDMUX_CNT_ING_BYTE, "rx bytes"}, + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"}, + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, + {DPDMUX_CNT_EGR_FRAME, "tx frames"}, + {DPDMUX_CNT_EGR_BYTE, "tx bytes"}, + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, +}; + +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(evb_ethtool_counters); + default: + return -EOPNOTSUPP; + } +} + +static void evb_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u32 i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) + memcpy(data + i * ETH_GSTRING_LEN, + evb_ethtool_counters[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void evb_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); + u32 i; + int err; + + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) { + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io, + 0, + port_priv->evb_priv->mux_handle, + port_priv->port_index, + evb_ethtool_counters[i].id, + &data[i]); + if (err) + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n", + evb_ethtool_counters[i].name, err); + } +} + +static const struct ethtool_ops evb_port_ethtool_ops = { + .get_drvinfo = &evb_get_drvinfo, + .get_link = ðtool_op_get_link, + .get_settings = &evb_get_settings, + .set_settings = &evb_set_settings, + .get_strings = &evb_ethtool_get_strings, + .get_ethtool_stats = &evb_ethtool_get_stats, + .get_sset_count = &evb_ethtool_get_sset_count, +}; + +static int evb_open(struct net_device *netdev) +{ + struct evb_priv *priv = netdev_priv(netdev); + int err = 0; + + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle); + if (unlikely(err)) + netdev_err(netdev, "dpdmux_enable err %d\n", err); + + return err; +} + +static int evb_close(struct net_device *netdev) +{ + struct evb_priv *priv = netdev_priv(netdev); + int err = 0; + + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle); + if (unlikely(err)) + netdev_err(netdev, "dpdmux_disable err %d\n", err); + + return err; +} + +static const struct net_device_ops evb_ops = { + .ndo_start_xmit = &evb_dropframe, + .ndo_open = &evb_open, + .ndo_stop = &evb_close, + + .ndo_bridge_setlink = &evb_setlink, + .ndo_bridge_getlink = &evb_getlink, + .ndo_bridge_dellink = &evb_dellink, + + .ndo_get_stats64 = &evb_port_get_stats, + .ndo_change_mtu = &evb_change_mtu, +}; + +static int evb_takedown(struct fsl_mc_device *evb_dev) +{ + struct device *dev = &evb_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + int err; + + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle); + if (unlikely(err)) + dev_warn(dev, "dpdmux_close err %d\n", err); + + return 0; +} + +static int evb_init(struct fsl_mc_device *evb_dev) +{ + struct device *dev = &evb_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + u16 version_major; + u16 version_minor; + int err = 0; + + priv->dev_id = evb_dev->obj_desc.id; + + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle); + if (unlikely(err)) { + dev_err(dev, "dpdmux_open err %d\n", err); + goto err_exit; + } + if (!priv->mux_handle) { + dev_err(dev, "dpdmux_open returned null handle but no error\n"); + err = -EFAULT; + goto err_exit; + } + + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle, + &priv->attr); + if (unlikely(err)) { + dev_err(dev, "dpdmux_get_attributes err %d\n", err); + goto err_close; + } + + err = dpdmux_get_api_version(priv->mc_io, 0, + &version_major, + &version_minor); + if (unlikely(err)) { + dev_err(dev, "dpdmux_get_api_version err %d\n", err); + goto err_close; + } + + /* Minimum supported DPDMUX version check */ + if (version_major < DPDMUX_MIN_VER_MAJOR || + (version_major == DPDMUX_MIN_VER_MAJOR && + version_minor < DPDMUX_MIN_VER_MINOR)) { + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n", + version_major, version_minor, + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR); + err = -ENOTSUPP; + goto err_close; + } + + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle); + if (unlikely(err)) { + dev_err(dev, "dpdmux_reset err %d\n", err); + goto err_close; + } + + return 0; + +err_close: + dpdmux_close(priv->mc_io, 0, priv->mux_handle); +err_exit: + return err; +} + +static int evb_remove(struct fsl_mc_device *evb_dev) +{ + struct device *dev = &evb_dev->dev; + struct net_device *netdev = dev_get_drvdata(dev); + struct evb_priv *priv = netdev_priv(netdev); + struct evb_port_priv *port_priv; + struct list_head *pos; + + list_for_each(pos, &priv->port_list) { + port_priv = list_entry(pos, struct evb_port_priv, list); + + rtnl_lock(); + netdev_upper_dev_unlink(port_priv->netdev, netdev); + rtnl_unlock(); + + unregister_netdev(port_priv->netdev); + free_netdev(port_priv->netdev); + } + + evb_teardown_irqs(evb_dev); + + unregister_netdev(netdev); + + evb_takedown(evb_dev); + fsl_mc_portal_free(priv->mc_io); + + dev_set_drvdata(dev, NULL); + free_netdev(netdev); + + return 0; +} + +static int evb_probe(struct fsl_mc_device *evb_dev) +{ + struct device *dev; + struct evb_priv *priv = NULL; + struct net_device *netdev = NULL; + char port_name[IFNAMSIZ]; + int i; + int err = 0; + + dev = &evb_dev->dev; + + /* register switch device, it's for management only - no I/O */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + dev_err(dev, "alloc_etherdev error\n"); + return -ENOMEM; + } + netdev->netdev_ops = &evb_ops; + + dev_set_drvdata(dev, netdev); + + priv = netdev_priv(netdev); + + err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io); + if (unlikely(err)) { + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_free_netdev; + } + if (!priv->mc_io) { + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n"); + err = -EFAULT; + goto err_free_netdev; + } + + err = evb_init(evb_dev); + if (unlikely(err)) { + dev_err(dev, "evb init err %d\n", err); + goto err_free_cmdport; + } + + INIT_LIST_HEAD(&priv->port_list); + netdev->flags |= IFF_PROMISC | IFF_MASTER; + + dev_alloc_name(netdev, "evb%d"); + + /* register switch ports */ + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name); + + /* only register downlinks? */ + for (i = 0; i < priv->attr.num_ifs + 1; i++) { + struct net_device *port_netdev; + struct evb_port_priv *port_priv; + + if (i) { + port_netdev = + alloc_etherdev(sizeof(struct evb_port_priv)); + if (!port_netdev) { + dev_err(dev, "alloc_etherdev error\n"); + goto err_takedown; + } + + port_priv = netdev_priv(port_netdev); + + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE; + + dev_alloc_name(port_netdev, port_name); + } else { + port_netdev = netdev; + port_priv = &priv->uplink; + } + + port_priv->netdev = port_netdev; + port_priv->evb_priv = priv; + port_priv->port_index = i; + + SET_NETDEV_DEV(port_netdev, dev); + + if (i) { + port_netdev->netdev_ops = &evb_port_ops; + + err = register_netdev(port_netdev); + if (err < 0) { + dev_err(dev, "register_netdev err %d\n", err); + free_netdev(port_netdev); + goto err_takedown; + } + + rtnl_lock(); + err = netdev_master_upper_dev_link(port_netdev, netdev, + NULL, NULL); + if (unlikely(err)) { + dev_err(dev, "netdev_master_upper_dev_link err %d\n", + err); + unregister_netdev(port_netdev); + free_netdev(port_netdev); + rtnl_unlock(); + goto err_takedown; + } + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, + IFF_SLAVE, GFP_KERNEL); + rtnl_unlock(); + + list_add(&port_priv->list, &priv->port_list); + } else { + err = register_netdev(netdev); + + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + goto err_takedown; + } + } + + port_netdev->ethtool_ops = &evb_port_ethtool_ops; + + /* ports are up from init */ + rtnl_lock(); + err = dev_open(port_netdev); + rtnl_unlock(); + if (unlikely(err)) + dev_warn(dev, "dev_open err %d\n", err); + } + + /* setup irqs */ + err = evb_setup_irqs(evb_dev); + if (unlikely(err)) { + dev_warn(dev, "evb_setup_irqs err %d\n", err); + goto err_takedown; + } + + dev_info(dev, "probed evb device with %d ports\n", + priv->attr.num_ifs); + return 0; + +err_takedown: + evb_remove(evb_dev); +err_free_cmdport: + fsl_mc_portal_free(priv->mc_io); +err_free_netdev: + return err; +} + +static const struct fsl_mc_device_id evb_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpdmux", + }, + {} +}; + +static struct fsl_mc_driver evb_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = evb_probe, + .remove = evb_remove, + .match_id_table = evb_match_id_table, +}; + +module_fsl_mc_driver(evb_drv); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)"); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig @@ -0,0 +1,23 @@ +config FSL_DPAA2_MAC + tristate "DPAA2 MAC / PHY interface" + depends on FSL_MC_BUS && FSL_DPAA2 + select MDIO_BUS_MUX_MMIOREG + select FSL_XGMAC_MDIO + select FIXED_PHY + ---help--- + Prototype driver for DPAA2 MAC / PHY interface object. + This driver works as a proxy between phylib including phy drivers and + the MC firmware. It receives updates on link state changes from PHY + lib and forwards them to MC and receives interrupt from MC whenever + a request is made to change the link state. + + +config FSL_DPAA2_MAC_NETDEVS + bool "Expose net interfaces for PHYs" + default n + depends on FSL_DPAA2_MAC + ---help--- + Exposes macX net interfaces which allow direct control over MACs and + PHYs. + . + Leave disabled if unsure. --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/Makefile @@ -0,0 +1,10 @@ + +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o + +dpaa2-mac-objs := mac.o dpmac.o + +all: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules + +clean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h @@ -0,0 +1,172 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _FSL_DPMAC_CMD_H +#define _FSL_DPMAC_CMD_H + +/* DPMAC Version */ +#define DPMAC_VER_MAJOR 4 +#define DPMAC_VER_MINOR 2 +#define DPMAC_CMD_BASE_VERSION 1 +#define DPMAC_CMD_ID_OFFSET 4 + +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800) +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c) +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c) +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c) +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c) + +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004) +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005) + +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012) +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013) +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014) +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015) +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016) +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017) + +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2) +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3) +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4) + +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPMAC_MASK(field) \ + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \ + DPMAC_##field##_SHIFT) +#define dpmac_set_field(var, field, val) \ + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field))) +#define dpmac_get_field(var, field) \ + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT) + +struct dpmac_cmd_open { + u32 dpmac_id; +}; + +struct dpmac_cmd_create { + u32 mac_id; +}; + +struct dpmac_cmd_destroy { + u32 dpmac_id; +}; + +struct dpmac_cmd_set_irq_enable { + u8 enable; + u8 pad[3]; + u8 irq_index; +}; + +struct dpmac_cmd_get_irq_enable { + u32 pad; + u8 irq_index; +}; + +struct dpmac_rsp_get_irq_enable { + u8 enabled; +}; + +struct dpmac_cmd_set_irq_mask { + u32 mask; + u8 irq_index; +}; + +struct dpmac_cmd_get_irq_mask { + u32 pad; + u8 irq_index; +}; + +struct dpmac_rsp_get_irq_mask { + u32 mask; +}; + +struct dpmac_cmd_get_irq_status { + u32 status; + u8 irq_index; +}; + +struct dpmac_rsp_get_irq_status { + u32 status; +}; + +struct dpmac_cmd_clear_irq_status { + u32 status; + u8 irq_index; +}; + +struct dpmac_rsp_get_attributes { + u8 eth_if; + u8 link_type; + u16 id; + u32 max_rate; +}; + +struct dpmac_rsp_get_link_cfg { + u64 options; + u32 rate; +}; + +#define DPMAC_STATE_SIZE 1 +#define DPMAC_STATE_SHIFT 0 + +struct dpmac_cmd_set_link_state { + u64 options; + u32 rate; + u32 pad; + /* only least significant bit is valid */ + u8 up; +}; + +struct dpmac_cmd_get_counter { + u8 type; +}; + +struct dpmac_rsp_get_counter { + u64 pad; + u64 counter; +}; + +struct dpmac_rsp_get_api_version { + u16 major; + u16 minor; +}; + +struct dpmac_cmd_set_port_mac_addr { + u8 pad[2]; + u8 addr[6]; +}; + +#endif /* _FSL_DPMAC_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c @@ -0,0 +1,620 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "../../fsl-mc/include/mc-sys.h" +#include "../../fsl-mc/include/mc-cmd.h" +#include "dpmac.h" +#include "dpmac-cmd.h" + +/** + * dpmac_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpmac_id: DPMAC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpmac_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpmac_id, + u16 *token) +{ + struct dpmac_cmd_open *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpmac_cmd_open *)cmd.params; + cmd_params->dpmac_id = cpu_to_le32(dpmac_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return err; +} + +/** + * dpmac_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_create() - Create the DPMAC object. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPMAC object, allocate required resources and + * perform required initialization. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpmac_cfg *cfg, + u32 *obj_id) +{ + struct dpmac_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpmac_cmd_create *)cmd.params; + cmd_params->mac_id = cpu_to_le32(cfg->mac_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpmac_destroy() - Destroy the DPMAC object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpmac_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + u32 object_id) +{ + struct dpmac_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpmac_cmd_destroy *)cmd.params; + cmd_params->dpmac_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @en: Interrupt state - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct dpmac_cmd_set_irq_enable *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->enable = en; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en) +{ + struct dpmac_cmd_get_irq_enable *cmd_params; + struct dpmac_rsp_get_irq_enable *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params; + *en = rsp_params->enabled; + + return 0; +} + +/** + * dpmac_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct dpmac_cmd_set_irq_mask *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask) +{ + struct dpmac_cmd_get_irq_mask *cmd_params; + struct dpmac_rsp_get_irq_mask *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpmac_get_irq_status() - Get the current status of any pending interrupts. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct dpmac_cmd_get_irq_status *cmd_params; + struct dpmac_rsp_get_irq_status *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpmac_clear_irq_status() - Clear a pending interrupt's status + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @irq_index: The interrupt index to configure + * @status: Bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct dpmac_cmd_clear_irq_status *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_get_attributes - Retrieve DPMAC attributes. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_attr *attr) +{ + struct dpmac_rsp_get_attributes *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params; + attr->eth_if = rsp_params->eth_if; + attr->link_type = rsp_params->link_type; + attr->id = le16_to_cpu(rsp_params->id); + attr->max_rate = le32_to_cpu(rsp_params->max_rate); + + return 0; +} + +/** + * dpmac_get_link_cfg() - Get Ethernet link configuration + * @mc_io: Pointer to opaque I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @cfg: Returned structure with the link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_link_cfg *cfg) +{ + struct dpmac_rsp_get_link_cfg *rsp_params; + struct mc_command cmd = { 0 }; + int err = 0; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params; + cfg->options = le64_to_cpu(rsp_params->options); + cfg->rate = le32_to_cpu(rsp_params->rate); + + return 0; +} + +/** + * dpmac_set_link_state() - Set the Ethernet link status + * @mc_io: Pointer to opaque I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @link_state: Link state configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_set_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_link_state *link_state) +{ + struct dpmac_cmd_set_link_state *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, + cmd_flags, + token); + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params; + cmd_params->options = cpu_to_le64(link_state->options); + cmd_params->rate = cpu_to_le32(link_state->rate); + cmd_params->up = dpmac_get_field(link_state->up, STATE); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_get_counter() - Read a specific DPMAC counter + * @mc_io: Pointer to opaque I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @type: The requested counter + * @counter: Returned counter value + * + * Return: The requested counter; '0' otherwise. + */ +int dpmac_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpmac_counter type, + u64 *counter) +{ + struct dpmac_cmd_get_counter *dpmac_cmd; + struct dpmac_rsp_get_counter *dpmac_rsp; + struct mc_command cmd = { 0 }; + int err = 0; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, + cmd_flags, + token); + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params; + dpmac_cmd->type = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params; + *counter = le64_to_cpu(dpmac_rsp->counter); + + return 0; +} + +/* untested */ +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 addr[6]) +{ + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR, + cmd_flags, + token); + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params; + dpmac_cmd->addr[0] = addr[5]; + dpmac_cmd->addr[1] = addr[4]; + dpmac_cmd->addr[2] = addr[3]; + dpmac_cmd->addr[3] = addr[2]; + dpmac_cmd->addr[4] = addr[1]; + dpmac_cmd->addr[5] = addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpmac_get_api_version() - Get Data Path MAC version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path mac API + * @minor_ver: Minor version of data path mac API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpmac_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct dpmac_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h @@ -0,0 +1,342 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPMAC_H +#define __FSL_DPMAC_H + +/* Data Path MAC API + * Contains initialization APIs and runtime control APIs for DPMAC + */ + +struct fsl_mc_io; + +int dpmac_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpmac_id, + u16 *token); + +int dpmac_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * enum dpmac_link_type - DPMAC link type + * @DPMAC_LINK_TYPE_NONE: No link + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type + */ +enum dpmac_link_type { + DPMAC_LINK_TYPE_NONE, + DPMAC_LINK_TYPE_FIXED, + DPMAC_LINK_TYPE_PHY, + DPMAC_LINK_TYPE_BACKPLANE +}; + +/** + * enum dpmac_eth_if - DPMAC Ethrnet interface + * @DPMAC_ETH_IF_MII: MII interface + * @DPMAC_ETH_IF_RMII: RMII interface + * @DPMAC_ETH_IF_SMII: SMII interface + * @DPMAC_ETH_IF_GMII: GMII interface + * @DPMAC_ETH_IF_RGMII: RGMII interface + * @DPMAC_ETH_IF_SGMII: SGMII interface + * @DPMAC_ETH_IF_QSGMII: QSGMII interface + * @DPMAC_ETH_IF_XAUI: XAUI interface + * @DPMAC_ETH_IF_XFI: XFI interface + */ +enum dpmac_eth_if { + DPMAC_ETH_IF_MII, + DPMAC_ETH_IF_RMII, + DPMAC_ETH_IF_SMII, + DPMAC_ETH_IF_GMII, + DPMAC_ETH_IF_RGMII, + DPMAC_ETH_IF_SGMII, + DPMAC_ETH_IF_QSGMII, + DPMAC_ETH_IF_XAUI, + DPMAC_ETH_IF_XFI +}; + +/** + * struct dpmac_cfg - Structure representing DPMAC configuration + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, + * the MAC IDs are continuous. + * For example: 2 WRIOPs, 16 MACs in each: + * MAC IDs for the 1st WRIOP: 1-16, + * MAC IDs for the 2nd WRIOP: 17-32. + */ +struct dpmac_cfg { + u16 mac_id; +}; + +int dpmac_create(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + const struct dpmac_cfg *cfg, + u32 *obj_id); + +int dpmac_destroy(struct fsl_mc_io *mc_io, + u16 dprc_token, + u32 cmd_flags, + u32 object_id); + +/** + * DPMAC IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPMAC_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 +/** + * IRQ event - Indicates that the link state changed + */ +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 + +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpmac_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpmac_attr - Structure representing DPMAC attributes + * @id: DPMAC object ID + * @max_rate: Maximum supported rate - in Mbps + * @eth_if: Ethernet interface + * @link_type: link type + */ +struct dpmac_attr { + u16 id; + u32 max_rate; + enum dpmac_eth_if eth_if; + enum dpmac_link_type link_type; +}; + +int dpmac_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_attr *attr); + +/** + * DPMAC link configuration/state options + */ + +/** + * Enable auto-negotiation + */ +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpmac_link_cfg - Structure representing DPMAC link configuration + * @rate: Link's rate - in Mbps + * @options: Enable/Disable DPMAC link cfg features (bitmap) + */ +struct dpmac_link_cfg { + u32 rate; + u64 options; +}; + +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_link_cfg *cfg); + +/** + * struct dpmac_link_state - DPMAC link configuration request + * @rate: Rate in Mbps + * @options: Enable/Disable DPMAC link cfg features (bitmap) + * @up: Link state + */ +struct dpmac_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpmac_set_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpmac_link_state *link_state); + +/** + * enum dpmac_counter - DPMAC counter types + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger + * (up to max frame length specified), + * good or bad. + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received + * with a wrong CRC + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length + * specified, with a bad frame check sequence. + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. + * Occurs when a receive FIFO overflows. + * Includes also frames truncated as a result of + * the receive FIFO overflow. + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error + * (optional used for wrong SFD). + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 + * bytes long with a good CRC. + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length + * specified, with a good frame check sequence. + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted + * (regular and PFC). + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid + * frames and valid pause frames. + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error + * (except for undersized/fragment frame). + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid + * frames and valid pause frames transmitted. + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including + * pause frames. + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including + * pause frames. + */ +enum dpmac_counter { + DPMAC_CNT_ING_FRAME_64, + DPMAC_CNT_ING_FRAME_127, + DPMAC_CNT_ING_FRAME_255, + DPMAC_CNT_ING_FRAME_511, + DPMAC_CNT_ING_FRAME_1023, + DPMAC_CNT_ING_FRAME_1518, + DPMAC_CNT_ING_FRAME_1519_MAX, + DPMAC_CNT_ING_FRAG, + DPMAC_CNT_ING_JABBER, + DPMAC_CNT_ING_FRAME_DISCARD, + DPMAC_CNT_ING_ALIGN_ERR, + DPMAC_CNT_EGR_UNDERSIZED, + DPMAC_CNT_ING_OVERSIZED, + DPMAC_CNT_ING_VALID_PAUSE_FRAME, + DPMAC_CNT_EGR_VALID_PAUSE_FRAME, + DPMAC_CNT_ING_BYTE, + DPMAC_CNT_ING_MCAST_FRAME, + DPMAC_CNT_ING_BCAST_FRAME, + DPMAC_CNT_ING_ALL_FRAME, + DPMAC_CNT_ING_UCAST_FRAME, + DPMAC_CNT_ING_ERR_FRAME, + DPMAC_CNT_EGR_BYTE, + DPMAC_CNT_EGR_MCAST_FRAME, + DPMAC_CNT_EGR_BCAST_FRAME, + DPMAC_CNT_EGR_UCAST_FRAME, + DPMAC_CNT_EGR_ERR_FRAME, + DPMAC_CNT_ING_GOOD_FRAME, + DPMAC_CNT_ENG_GOOD_FRAME +}; + +int dpmac_get_counter(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpmac_counter type, + u64 *counter); + +/** + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical + * port. This is not used for filtering, MAC is always in + * promiscuous mode, it is passed to DPNIs through DPNI API for + * application used. + * @mc_io: Pointer to opaque I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @addr: MAC address to set + * + * Return: The requested counter; '0' otherwise. + */ +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 addr[6]); + +int dpmac_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +#endif /* __FSL_DPMAC_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/mac.c @@ -0,0 +1,670 @@ +/* Copyright 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "../../fsl-mc/include/mc.h" +#include "../../fsl-mc/include/mc-sys.h" + +#include "dpmac.h" +#include "dpmac-cmd.h" + +struct dpaa2_mac_priv { + struct net_device *netdev; + struct fsl_mc_device *mc_dev; + struct dpmac_attr attr; + struct dpmac_link_state old_state; +}; + +/* TODO: fix the 10G modes, mapping can't be right: + * XGMII is paralel + * XAUI is serial, using 8b/10b encoding + * XFI is also serial but using 64b/66b encoding + * they can't all map to XGMII... + * + * This must be kept in sync with enum dpmac_eth_if. + */ +static phy_interface_t dpaa2_mac_iface_mode[] = { + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */ + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */ + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */ + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */ + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */ + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */ + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */ + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */ + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */ +}; + +static void dpaa2_mac_link_changed(struct net_device *netdev) +{ + struct phy_device *phydev; + struct dpmac_link_state state = { 0 }; + struct dpaa2_mac_priv *priv = netdev_priv(netdev); + int err; + + /* the PHY just notified us of link state change */ + phydev = netdev->phydev; + + state.up = !!phydev->link; + if (phydev->link) { + state.rate = phydev->speed; + + if (!phydev->duplex) + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; + if (phydev->autoneg) + state.options |= DPMAC_LINK_OPT_AUTONEG; + + netif_carrier_on(netdev); + } else { + netif_carrier_off(netdev); + } + + if (priv->old_state.up != state.up || + priv->old_state.rate != state.rate || + priv->old_state.options != state.options) { + priv->old_state = state; + phy_print_status(phydev); + } + + /* We must interrogate MC at all times, because we don't know + * when and whether a potential DPNI may have read the link state. + */ + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, + priv->mc_dev->mc_handle, &state); + if (unlikely(err)) + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); +} + +static int dpaa2_mac_open(struct net_device *netdev) +{ + /* start PHY state machine */ + phy_start(netdev->phydev); + + return 0; +} + +static int dpaa2_mac_stop(struct net_device *netdev) +{ + if (!netdev->phydev) + goto done; + + /* stop PHY state machine */ + phy_stop(netdev->phydev); + + /* signal link down to firmware */ + netdev->phydev->link = 0; + dpaa2_mac_link_changed(netdev); + +done: + return 0; +} + +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, + struct net_device *dev) +{ + /* we don't support I/O for now, drop the frame */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int dpaa2_mac_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + return phy_ethtool_gset(netdev->phydev, cmd); +} + +static int dpaa2_mac_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + return phy_ethtool_sset(netdev->phydev, cmd); +} + +static struct rtnl_link_stats64 *dpaa2_mac_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct dpaa2_mac_priv *priv = netdev_priv(netdev); + u64 tmp; + int err; + + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_MCAST_FRAME, + &storage->tx_packets); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_BCAST_FRAME, &tmp); + if (err) + goto error; + storage->tx_packets += tmp; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_UCAST_FRAME, &tmp); + if (err) + goto error; + storage->tx_packets += tmp; + + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); + if (err) + goto error; + + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_FRAME_DISCARD, + &storage->rx_dropped); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); + if (err) + goto error; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_OVERSIZED, &tmp); + if (err) + goto error; + storage->rx_errors += tmp; + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, + DPMAC_CNT_ING_BYTE, &storage->rx_bytes); + if (err) + goto error; + + return storage; +error: + netdev_err(netdev, "dpmac_get_counter err %d\n", err); + return storage; +} + +static struct { + enum dpmac_counter id; + char name[ETH_GSTRING_LEN]; +} dpaa2_mac_counters[] = { + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, + {DPMAC_CNT_ING_FRAG, "rx frags"}, + {DPMAC_CNT_ING_JABBER, "rx jabber"}, + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, + {DPMAC_CNT_ING_BYTE, "rx bytes"}, + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, + {DPMAC_CNT_EGR_BYTE, "tx bytes"}, + +}; + +static void dpaa2_mac_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) + memcpy(data + i * ETH_GSTRING_LEN, + dpaa2_mac_counters[i].name, + ETH_GSTRING_LEN); + break; + } +} + +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct dpaa2_mac_priv *priv = netdev_priv(netdev); + int i; + int err; + + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { + err = dpmac_get_counter(priv->mc_dev->mc_io, + 0, + priv->mc_dev->mc_handle, + dpaa2_mac_counters[i].id, &data[i]); + if (err) + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", + dpaa2_mac_counters[i].name, err); + } +} + +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(dpaa2_mac_counters); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops dpaa2_mac_ndo_ops = { + .ndo_open = &dpaa2_mac_open, + .ndo_stop = &dpaa2_mac_stop, + .ndo_start_xmit = &dpaa2_mac_drop_frame, + .ndo_get_stats64 = &dpaa2_mac_get_stats, +}; + +static const struct ethtool_ops dpaa2_mac_ethtool_ops = { + .get_settings = &dpaa2_mac_get_settings, + .set_settings = &dpaa2_mac_set_settings, + .get_strings = &dpaa2_mac_get_strings, + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, + .get_sset_count = &dpaa2_mac_get_sset_count, +}; +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ + +static void configure_link(struct dpaa2_mac_priv *priv, + struct dpmac_link_cfg *cfg) +{ + struct phy_device *phydev = priv->netdev->phydev; + + if (unlikely(!phydev)) + return; + + phydev->speed = cfg->rate; + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); + + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { + phydev->autoneg = 1; + phydev->advertising |= ADVERTISED_Autoneg; + } else { + phydev->autoneg = 0; + phydev->advertising &= ~ADVERTISED_Autoneg; + } + + phy_start_aneg(phydev); +} + +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); + struct dpmac_link_cfg link_cfg; + u32 status; + int err; + + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, + DPMAC_IRQ_INDEX, &status); + if (unlikely(err || !status)) + return IRQ_NONE; + + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, + &link_cfg); + if (unlikely(err)) + goto out; + + configure_link(priv, &link_cfg); + } + +out: + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, + DPMAC_IRQ_INDEX, status); + + return IRQ_HANDLED; +} + +static int setup_irqs(struct fsl_mc_device *mc_dev) +{ + int err = 0; + struct fsl_mc_device_irq *irq; + + err = fsl_mc_allocate_irqs(mc_dev); + if (err) { + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); + return err; + } + + irq = mc_dev->irqs[0]; + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq, + NULL, &dpaa2_mac_irq_handler, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&mc_dev->dev), &mc_dev->dev); + if (err) { + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", + err); + goto free_irq; + } + + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ); + if (err) { + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); + goto free_irq; + } + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, + DPMAC_IRQ_INDEX, 1); + if (err) { + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); + goto free_irq; + } + + return 0; + +free_irq: + fsl_mc_free_irqs(mc_dev); + + return err; +} + +static void teardown_irqs(struct fsl_mc_device *mc_dev) +{ + int err; + + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, + DPMAC_IRQ_INDEX, 0); + if (err) + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); + + fsl_mc_free_irqs(mc_dev); +} + +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id) +{ + struct device_node *dpmacs, *dpmac = NULL; + struct device_node *mc_node = dev->of_node; + u32 id; + int err; + + dpmacs = of_find_node_by_name(mc_node, "dpmacs"); + if (!dpmacs) { + dev_err(dev, "No dpmacs subnode in device-tree\n"); + return NULL; + } + + while ((dpmac = of_get_next_child(dpmacs, dpmac))) { + err = of_property_read_u32(dpmac, "reg", &id); + if (err) + continue; + if (id == dpmac_id) + return dpmac; + } + + return NULL; +} + +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) +{ + struct device *dev; + struct dpaa2_mac_priv *priv = NULL; + struct device_node *phy_node, *dpmac_node; + struct net_device *netdev; + phy_interface_t if_mode; + int err = 0; + + dev = &mc_dev->dev; + + /* prepare a net_dev structure to make the phy lib API happy */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + dev_err(dev, "alloc_etherdev error\n"); + err = -ENOMEM; + goto err_exit; + } + priv = netdev_priv(netdev); + priv->mc_dev = mc_dev; + priv->netdev = netdev; + + SET_NETDEV_DEV(netdev, dev); + +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); +#endif + + dev_set_drvdata(dev, priv); + + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); + if (err || !mc_dev->mc_io) { + dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); + err = -ENODEV; + goto err_free_netdev; + } + + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, + &mc_dev->mc_handle); + if (err || !mc_dev->mc_handle) { + dev_err(dev, "dpmac_open error: %d\n", err); + err = -ENODEV; + goto err_free_mcp; + } + + err = dpmac_get_attributes(mc_dev->mc_io, 0, + mc_dev->mc_handle, &priv->attr); + if (err) { + dev_err(dev, "dpmac_get_attributes err %d\n", err); + err = -EINVAL; + goto err_close; + } + + /* Look up the DPMAC node in the device-tree. */ + dpmac_node = find_dpmac_node(dev, priv->attr.id); + if (!dpmac_node) { + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); + err = -ENODEV; + goto err_close; + } + + err = setup_irqs(mc_dev); + if (err) { + err = -EFAULT; + goto err_close; + } + +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + /* OPTIONAL, register netdev just to make it visible to the user */ + netdev->netdev_ops = &dpaa2_mac_ndo_ops; + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; + + /* phy starts up enabled so netdev should be up too */ + netdev->flags |= IFF_UP; + + err = register_netdev(priv->netdev); + if (err < 0) { + dev_err(dev, "register_netdev error %d\n", err); + err = -ENODEV; + goto err_free_irq; + } +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ + + /* probe the PHY as a fixed-link if there's a phy-handle defined + * in the device tree + */ + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); + if (!phy_node) { + goto probe_fixed_link; + } + + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", + phy_modes(if_mode), priv->attr.eth_if); + } else { + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", + priv->attr.eth_if); + goto probe_fixed_link; + } + + /* try to connect to the PHY */ + netdev->phydev = of_phy_connect(netdev, phy_node, + &dpaa2_mac_link_changed, 0, if_mode); + if (!netdev->phydev) { + /* No need for dev_err(); the kernel's loud enough as it is. */ + dev_dbg(dev, "Can't of_phy_connect() now.\n"); + /* We might be waiting for the MDIO MUX to probe, so defer + * our own probing. + */ + err = -EPROBE_DEFER; + goto err_defer; + } + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); + +probe_fixed_link: + if (!netdev->phydev) { + struct fixed_phy_status status = { + .link = 1, + /* fixed-phys don't support 10Gbps speed for now */ + .speed = 1000, + .duplex = 1, + }; + + /* try to register a fixed link phy */ + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1, + NULL); + if (!netdev->phydev || IS_ERR(netdev->phydev)) { + dev_err(dev, "error trying to register fixed PHY\n"); + /* So we don't crash unregister_netdev() later on */ + netdev->phydev = NULL; + err = -EFAULT; + goto err_no_phy; + } + dev_info(dev, "Registered fixed PHY.\n"); + } + + dpaa2_mac_open(netdev); + + return 0; + +err_defer: +err_no_phy: +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + unregister_netdev(netdev); +err_free_irq: +#endif + teardown_irqs(mc_dev); +err_close: + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); +err_free_mcp: + fsl_mc_portal_free(mc_dev->mc_io); +err_free_netdev: + free_netdev(netdev); +err_exit: + return err; +} + +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) +{ + struct device *dev = &mc_dev->dev; + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); + struct net_device *netdev = priv->netdev; + + dpaa2_mac_stop(netdev); + + if (phy_is_pseudo_fixed_link(netdev->phydev)) + fixed_phy_unregister(netdev->phydev); + else + phy_disconnect(netdev->phydev); + netdev->phydev = NULL; + +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + unregister_netdev(priv->netdev); +#endif + teardown_irqs(priv->mc_dev); + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); + fsl_mc_portal_free(priv->mc_dev->mc_io); + free_netdev(priv->netdev); + + dev_set_drvdata(dev, NULL); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpmac", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table); + +static struct fsl_mc_driver dpaa2_mac_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_mac_probe, + .remove = dpaa2_mac_remove, + .match_id_table = dpaa2_mac_match_id_table, +}; + +module_fsl_mc_driver(dpaa2_mac_drv); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile @@ -0,0 +1,10 @@ + +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o + +dpaa2-rtc-objs := rtc.o dprtc.o + +all: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules + +clean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean --- /dev/null +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h @@ -0,0 +1,160 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _FSL_DPRTC_CMD_H +#define _FSL_DPRTC_CMD_H + +/* DPRTC Version */ +#define DPRTC_VER_MAJOR 2 +#define DPRTC_VER_MINOR 0 + +/* Command versioning */ +#define DPRTC_CMD_BASE_VERSION 1 +#define DPRTC_CMD_ID_OFFSET 4 + +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910) +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990) +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10) + +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002) +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003) +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004) +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005) +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006) + +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012) +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013) +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014) +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015) +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016) +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017) + +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0) +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5) +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6) +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7) +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8) +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9) +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPRTC_MASK(field) \ + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \ + DPRTC_##field##_SHIFT) +#define dprtc_get_field(var, field) \ + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT) + +#pragma pack(push, 1) +struct dprtc_cmd_open { + uint32_t dprtc_id; +}; + +struct dprtc_cmd_destroy { + uint32_t object_id; +}; + +#define DPRTC_ENABLE_SHIFT 0 +#define DPRTC_ENABLE_SIZE 1 + +struct dprtc_rsp_is_enabled { + uint8_t en; +}; + +struct dprtc_cmd_get_irq { + uint32_t pad; + uint8_t irq_index; +}; + +struct dprtc_cmd_set_irq_enable { + uint8_t en; + uint8_t pad[3]; + uint8_t irq_index; +}; + +struct dprtc_rsp_get_irq_enable { + uint8_t en; +}; + +struct dprtc_cmd_set_irq_mask { + uint32_t mask; + uint8_t irq_index; +}; + +struct dprtc_rsp_get_irq_mask { + uint32_t mask; +}; + +struct dprtc_cmd_get_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dprtc_rsp_get_irq_status { + uint32_t status; +}; + +struct dprtc_cmd_clear_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dprtc_rsp_get_attributes { + uint32_t pad; + uint32_t id; +}; + +struct dprtc_cmd_set_clock_offset { + uint64_t offset; +}; + +struct dprtc_get_freq_compensation { + uint32_t freq_compensation; +}; + +struct dprtc_time { + uint64_t time; +}; + +struct dprtc_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; +#pragma pack(pop) +#endif /* _FSL_DPRTC_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c @@ -0,0 +1,746 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "../../fsl-mc/include/mc-sys.h" +#include "../../fsl-mc/include/mc-cmd.h" +#include "dprtc.h" +#include "dprtc-cmd.h" + +/** + * dprtc_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dprtc_id: DPRTC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dprtc_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dprtc_id, + uint16_t *token) +{ + struct dprtc_cmd_open *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dprtc_cmd_open *)cmd.params; + cmd_params->dprtc_id = cpu_to_le32(dprtc_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return err; +} + +/** + * dprtc_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_create() - Create the DPRTC object. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPRTC object, allocate required resources and + * perform required initialization. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dprtc_cfg *cfg, + uint32_t *obj_id) +{ + struct mc_command cmd = { 0 }; + int err; + + (void)(cfg); /* unused */ + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE, + cmd_flags, + dprc_token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dprtc_destroy() - Destroy the DPRTC object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dprtc_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dprtc_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dprtc_cmd_destroy *)cmd.params; + cmd_params->object_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +int dprtc_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +int dprtc_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +int dprtc_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct dprtc_rsp_is_enabled *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params; + *en = dprtc_get_field(rsp_params->en, ENABLE); + + return 0; +} + +int dprtc_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @en: Interrupt state - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en) +{ + struct dprtc_cmd_set_irq_enable *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->en = en; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en) +{ + struct dprtc_rsp_get_irq_enable *rsp_params; + struct dprtc_cmd_get_irq *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params; + *en = rsp_params->en; + + return 0; +} + +/** + * dprtc_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask) +{ + struct dprtc_cmd_set_irq_mask *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask) +{ + struct dprtc_rsp_get_irq_mask *rsp_params; + struct dprtc_cmd_get_irq *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dprtc_get_irq_status() - Get the current status of any pending interrupts. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status) +{ + struct dprtc_cmd_get_irq_status *cmd_params; + struct dprtc_rsp_get_irq_status *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params; + *status = rsp_params->status; + + return 0; +} + +/** + * dprtc_clear_irq_status() - Clear a pending interrupt's status + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @irq_index: The interrupt index to configure + * @status: Bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status) +{ + struct dprtc_cmd_clear_irq_status *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_attributes - Retrieve DPRTC attributes. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dprtc_attr *attr) +{ + struct dprtc_rsp_get_attributes *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + + return 0; +} + +/** + * dprtc_set_clock_offset() - Sets the clock's offset + * (usually relative to another clock). + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @offset: New clock offset (in nanoseconds). + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int64_t offset) +{ + struct dprtc_cmd_set_clock_offset *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET, + cmd_flags, + token); + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params; + cmd_params->offset = cpu_to_le64(offset); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_freq_compensation() - Sets a new frequency compensation value. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: The new frequency compensation value to set. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t freq_compensation) +{ + struct dprtc_get_freq_compensation *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, + cmd_flags, + token); + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; + cmd_params->freq_compensation = cpu_to_le32(freq_compensation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: Frequency compensation value + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t *freq_compensation) +{ + struct dprtc_get_freq_compensation *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); + + return 0; +} + +/** + * dprtc_get_time() - Returns the current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: Current RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t *time) +{ + struct dprtc_time *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dprtc_time *)cmd.params; + *time = le64_to_cpu(rsp_params->time); + + return 0; +} + +/** + * dprtc_set_time() - Updates current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: New RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time) +{ + struct dprtc_time *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_alarm() - Defines and sets alarm. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: In nanoseconds, the time when the alarm + * should go off - must be a multiple of + * 1 microsecond + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_alarm(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, uint64_t time) +{ + struct dprtc_time *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_api_version() - Get Data Path Real Time Counter API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path real time counter API + * @minor_ver: Minor version of data path real time counter API + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct dprtc_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h @@ -0,0 +1,172 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __FSL_DPRTC_H +#define __FSL_DPRTC_H + +/* Data Path Real Time Counter API + * Contains initialization APIs and runtime control APIs for RTC + */ + +struct fsl_mc_io; + +/** + * Number of irq's + */ +#define DPRTC_MAX_IRQ_NUM 1 +#define DPRTC_IRQ_INDEX 0 + +/** + * Interrupt event masks: + */ + +/** + * Interrupt event mask indicating alarm event had occurred + */ +#define DPRTC_EVENT_ALARM 0x40000000 +/** + * Interrupt event mask indicating periodic pulse event had occurred + */ +#define DPRTC_EVENT_PPS 0x08000000 + +int dprtc_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dprtc_id, + uint16_t *token); + +int dprtc_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dprtc_cfg - Structure representing DPRTC configuration + * @options: place holder + */ +struct dprtc_cfg { + uint32_t options; +}; + +int dprtc_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dprtc_cfg *cfg, + uint32_t *obj_id); + +int dprtc_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int64_t offset); + +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t freq_compensation); + +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint32_t *freq_compensation); + +int dprtc_get_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t *time); + +int dprtc_set_time(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time); + +int dprtc_set_alarm(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t time); + +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en); + +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en); + +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask); + +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask); + +int dprtc_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status); + +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status); + +/** + * struct dprtc_attr - Structure representing DPRTC attributes + * @id: DPRTC object ID + */ +struct dprtc_attr { + int id; +}; + +int dprtc_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dprtc_attr *attr); + +int dprtc_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +#endif /* __FSL_DPRTC_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c @@ -0,0 +1,243 @@ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the above-listed copyright holders nor the + * names of any contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "../../fsl-mc/include/mc.h" +#include "../../fsl-mc/include/mc-sys.h" + +#include "dprtc.h" +#include "dprtc-cmd.h" + +#define N_EXT_TS 2 + +struct ptp_clock *clock; +struct fsl_mc_device *rtc_mc_dev; +u32 freqCompensation; + +/* PTP clock operations */ +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, tmr_add; + int neg_adj = 0; + int err = 0; + struct fsl_mc_device *mc_dev = rtc_mc_dev; + struct device *dev = &mc_dev->dev; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + tmr_add = freqCompensation; + adj = tmr_add; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, tmr_add); + if (err) + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err); + return 0; +} + +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + int err = 0; + struct fsl_mc_device *mc_dev = rtc_mc_dev; + struct device *dev = &mc_dev->dev; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return 0; + } + + now += delta; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now); + if (err) { + dev_err(dev, "dprtc_set_time err %d\n", err); + return 0; + } + return 0; +} + +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + u64 ns; + u32 remainder; + int err = 0; + struct fsl_mc_device *mc_dev = rtc_mc_dev; + struct device *dev = &mc_dev->dev; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return 0; + } + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + return 0; +} + +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + u64 ns; + int err = 0; + struct fsl_mc_device *mc_dev = rtc_mc_dev; + struct device *dev = &mc_dev->dev; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns); + if (err) + dev_err(dev, "dprtc_set_time err %d\n", err); + return 0; +} + +static struct ptp_clock_info ptp_dpaa2_caps = { + .owner = THIS_MODULE, + .name = "dpaa2 clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = N_EXT_TS, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfreq = ptp_dpaa2_adjfreq, + .adjtime = ptp_dpaa2_adjtime, + .gettime64 = ptp_dpaa2_gettime, + .settime64 = ptp_dpaa2_settime, +}; + +static int rtc_probe(struct fsl_mc_device *mc_dev) +{ + struct device *dev; + int err = 0; + int dpaa2_phc_index; + u32 tmr_add = 0; + + if (!mc_dev) + return -EFAULT; + + dev = &mc_dev->dev; + + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); + if (unlikely(err)) { + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_exit; + } + if (!mc_dev->mc_io) { + dev_err(dev, + "fsl_mc_portal_allocate returned null handle but no error\n"); + err = -EFAULT; + goto err_exit; + } + + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, + &mc_dev->mc_handle); + if (err) { + dev_err(dev, "dprtc_open err %d\n", err); + goto err_free_mcp; + } + if (!mc_dev->mc_handle) { + dev_err(dev, "dprtc_open returned null handle but no error\n"); + err = -EFAULT; + goto err_free_mcp; + } + + rtc_mc_dev = mc_dev; + + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, &tmr_add); + if (err) { + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err); + goto err_close; + } + freqCompensation = tmr_add; + + clock = ptp_clock_register(&ptp_dpaa2_caps, dev); + if (IS_ERR(clock)) { + err = PTR_ERR(clock); + goto err_close; + } + dpaa2_phc_index = ptp_clock_index(clock); + + return 0; +err_close: + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); +err_free_mcp: + fsl_mc_portal_free(mc_dev->mc_io); +err_exit: + return err; +} + +static int rtc_remove(struct fsl_mc_device *mc_dev) +{ + ptp_clock_unregister(clock); + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); + fsl_mc_portal_free(mc_dev->mc_io); + + return 0; +} + +static const struct fsl_mc_device_id rtc_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dprtc", + }, + {} +}; + +static struct fsl_mc_driver rtc_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = rtc_probe, + .remove = rtc_remove, + .match_id_table = rtc_match_id_table, +}; + +module_fsl_mc_driver(rtc_drv); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");