diff options
author | Yangbo Lu <yangbo.lu@nxp.com> | 2017-09-27 15:31:31 +0800 |
---|---|---|
committer | John Crispin <john@phrozen.org> | 2017-10-07 23:13:23 +0200 |
commit | 8fdda1cc1033e2bd0d048188af5167faffbf9b38 (patch) | |
tree | be12aa762b013e30476e2bd784398a0b802bbc71 /target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch | |
parent | 19951bbf57da87093f7bde25bad41571fbdaf4d9 (diff) | |
download | upstream-8fdda1cc1033e2bd0d048188af5167faffbf9b38.tar.gz upstream-8fdda1cc1033e2bd0d048188af5167faffbf9b38.tar.bz2 upstream-8fdda1cc1033e2bd0d048188af5167faffbf9b38.zip |
layerscape: add linux 4.9 support
This patch is to add linux 4.9 support for layerscape.
All these kernel patches are from NXP LSDK 1709 release.
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch')
-rw-r--r-- | target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch | 3781 |
1 files changed, 3781 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch new file mode 100644 index 0000000000..29fc301abd --- /dev/null +++ b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch @@ -0,0 +1,3781 @@ +From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001 +From: Yangbo Lu <yangbo.lu@nxp.com> +Date: Mon, 25 Sep 2017 12:12:20 +0800 +Subject: [PATCH] dma: support layerscape + +This is a integrated patch for layerscape dma support. + +Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com> +Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> +--- + drivers/dma/Kconfig | 14 + + drivers/dma/Makefile | 2 + + drivers/dma/dpaa2-qdma/Kconfig | 8 + + drivers/dma/dpaa2-qdma/Makefile | 8 + + drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++ + drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++ + drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++ + drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++ + 10 files changed, 3678 insertions(+) + create mode 100644 drivers/dma/dpaa2-qdma/Kconfig + create mode 100644 drivers/dma/dpaa2-qdma/Makefile + create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c + create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h + create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c + create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h + create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h + create mode 100644 drivers/dma/fsl-qdma.c + +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig +index 141aefbe..e5b0fb0b 100644 +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -192,6 +192,20 @@ config FSL_EDMA + multiplexing capability for DMA request sources(slot). + This module can be found on Freescale Vybrid and LS-1 SoCs. + ++config FSL_QDMA ++ tristate "Freescale qDMA engine support" ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ select DMA_ENGINE_RAID ++ select ASYNC_TX_ENABLE_CHANNEL_SWITCH ++ help ++ Support the Freescale qDMA engine with command queue and legacy mode. ++ Channel virtualization is supported through enqueuing of DMA jobs to, ++ or dequeuing DMA jobs from, different work queues. ++ This module can be found on Freescale LS SoCs. ++ ++source drivers/dma/dpaa2-qdma/Kconfig ++ + config FSL_RAID + tristate "Freescale RAID engine Support" + depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH +diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile +index e4dc9cac..1226cbb4 100644 +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/ + obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o + obj-$(CONFIG_FSL_DMA) += fsldma.o + obj-$(CONFIG_FSL_EDMA) += fsl-edma.o ++obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o ++obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/ + obj-$(CONFIG_FSL_RAID) += fsl_raid.o + obj-$(CONFIG_HSU_DMA) += hsu/ + obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o +diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig +new file mode 100644 +index 00000000..084e34bf +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/Kconfig +@@ -0,0 +1,8 @@ ++menuconfig FSL_DPAA2_QDMA ++ tristate "NXP DPAA2 QDMA" ++ depends on FSL_MC_BUS && FSL_MC_DPIO ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ ---help--- ++ NXP Data Path Acceleration Architecture 2 QDMA driver, ++ using the NXP MC bus driver. +diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile +new file mode 100644 +index 00000000..ba599ac6 +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/Makefile +@@ -0,0 +1,8 @@ ++# ++# Makefile for the NXP DPAA2 CAAM controllers ++# ++ccflags-y += -DVERSION=\"\" ++ ++obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o ++ ++fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o +diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c +new file mode 100644 +index 00000000..ad6b03f7 +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c +@@ -0,0 +1,986 @@ ++/* ++ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c ++ * ++ * Copyright 2015-2017 NXP Semiconductor, Inc. ++ * Author: Changming Huang <jerry.huang@nxp.com> ++ * ++ * Driver for the NXP QDMA engine with QMan mode. ++ * Channel virtualization is supported through enqueuing of DMA jobs to, ++ * or dequeuing DMA jobs from different work queues with QMan portal. ++ * This module can be found on NXP LS2 SoCs. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/interrupt.h> ++#include <linux/clk.h> ++#include <linux/dma-mapping.h> ++#include <linux/dmapool.h> ++#include <linux/slab.h> ++#include <linux/spinlock.h> ++#include <linux/of.h> ++#include <linux/of_device.h> ++#include <linux/of_address.h> ++#include <linux/of_irq.h> ++#include <linux/of_dma.h> ++#include <linux/types.h> ++#include <linux/delay.h> ++#include <linux/iommu.h> ++ ++#include "../virt-dma.h" ++ ++#include "../../../drivers/staging/fsl-mc/include/mc.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" ++#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" ++#include "fsl_dpdmai_cmd.h" ++#include "fsl_dpdmai.h" ++#include "dpaa2-qdma.h" ++ ++static bool smmu_disable = true; ++ ++static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan) ++{ ++ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); ++} ++ ++static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) ++{ ++ return container_of(vd, struct dpaa2_qdma_comp, vdesc); ++} ++ ++static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) ++{ ++ return 0; ++} ++ ++static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); ++ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); ++ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); ++} ++ ++/* ++ * Request a command descriptor for enqueue. ++ */ ++static struct dpaa2_qdma_comp * ++dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) ++{ ++ struct dpaa2_qdma_comp *comp_temp = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ if (list_empty(&dpaa2_chan->comp_free)) { ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ goto err; ++ comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool, ++ GFP_NOWAIT, &comp_temp->fd_bus_addr); ++ if (!comp_temp->fd_virt_addr) ++ goto err; ++ ++ comp_temp->fl_virt_addr = ++ (void *)((struct dpaa2_fd *) ++ comp_temp->fd_virt_addr + 1); ++ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr + ++ sizeof(struct dpaa2_fd); ++ comp_temp->desc_virt_addr = ++ (void *)((struct dpaa2_frame_list *) ++ comp_temp->fl_virt_addr + 3); ++ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr + ++ sizeof(struct dpaa2_frame_list) * 3; ++ ++ comp_temp->qchan = dpaa2_chan; ++ comp_temp->sg_blk_num = 0; ++ INIT_LIST_HEAD(&comp_temp->sg_src_head); ++ INIT_LIST_HEAD(&comp_temp->sg_dst_head); ++ return comp_temp; ++ } ++ comp_temp = list_first_entry(&dpaa2_chan->comp_free, ++ struct dpaa2_qdma_comp, list); ++ list_del(&comp_temp->list); ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ ++ comp_temp->qchan = dpaa2_chan; ++err: ++ return comp_temp; ++} ++ ++static void dpaa2_qdma_populate_fd(uint32_t format, ++ struct dpaa2_qdma_comp *dpaa2_comp) ++{ ++ struct dpaa2_fd *fd; ++ ++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; ++ memset(fd, 0, sizeof(struct dpaa2_fd)); ++ ++ /* fd populated */ ++ fd->simple.addr = dpaa2_comp->fl_bus_addr; ++ /* Bypass memory translation, Frame list format, short length disable */ ++ /* we need to disable BMT if fsl-mc use iova addr */ ++ if (smmu_disable) ++ fd->simple.bpid = QMAN_FD_BMT_ENABLE; ++ fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE; ++ ++ fd->simple.frc = format | QDMA_SER_CTX; ++} ++ ++/* first frame list for descriptor buffer */ ++static void dpaa2_qdma_populate_first_framel( ++ struct dpaa2_frame_list *f_list, ++ struct dpaa2_qdma_comp *dpaa2_comp) ++{ ++ struct dpaa2_qdma_sd_d *sdd; ++ ++ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr; ++ memset(sdd, 0, 2 * (sizeof(*sdd))); ++ /* source and destination descriptor */ ++ sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */ ++ sdd++; ++ sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */ ++ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ /* first frame list to source descriptor */ ++ f_list->addr_lo = dpaa2_comp->desc_bus_addr; ++ f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32); ++ f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */ ++ f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = 0; /* not the last frame list */ ++} ++ ++/* source and destination frame list */ ++static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list, ++ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt) ++{ ++ /* source frame list to source buffer */ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ f_list->addr_lo = src; ++ f_list->addr_hi = (src >> 32); ++ f_list->data_len.data_len_sl0 = len; ++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = 0; /* not the last frame list */ ++ ++ f_list++; ++ /* destination frame list to destination buffer */ ++ memset(f_list, 0, sizeof(struct dpaa2_frame_list)); ++ f_list->addr_lo = dst; ++ f_list->addr_hi = (dst >> 32); ++ f_list->data_len.data_len_sl0 = len; ++ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */ ++ if (smmu_disable) ++ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */ ++ f_list->sl = QDMA_FL_SL_LONG; /* long length */ ++ f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */ ++} ++ ++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy( ++ struct dma_chan *chan, dma_addr_t dst, ++ dma_addr_t src, size_t len, unsigned long flags) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_frame_list *f_list; ++ uint32_t format; ++ ++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); ++ ++#ifdef LONG_FORMAT ++ format = QDMA_FD_LONG_FORMAT; ++#else ++ format = QDMA_FD_SHORT_FORMAT; ++#endif ++ /* populate Frame descriptor */ ++ dpaa2_qdma_populate_fd(format, dpaa2_comp); ++ ++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr; ++ ++#ifdef LONG_FORMAT ++ /* first frame list for descriptor buffer (logn format) */ ++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); ++ ++ f_list++; ++#endif ++ ++ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF); ++ ++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); ++} ++ ++static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk( ++ struct dpaa2_qdma_comp *dpaa2_comp, ++ struct dpaa2_qdma_chan *dpaa2_chan) ++{ ++ struct qdma_sg_blk *sg_blk = NULL; ++ dma_addr_t phy_sgb; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ if (list_empty(&dpaa2_chan->sgb_free)) { ++ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc( ++ dpaa2_chan->sg_blk_pool, ++ GFP_NOWAIT, &phy_sgb); ++ if (!sg_blk) { ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ return sg_blk; ++ } ++ sg_blk->blk_virt_addr = (void *)(sg_blk + 1); ++ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk); ++ } else { ++ sg_blk = list_first_entry(&dpaa2_chan->sgb_free, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ } ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++ ++ return sg_blk; ++} ++ ++static uint32_t dpaa2_qdma_populate_sg(struct device *dev, ++ struct dpaa2_qdma_chan *dpaa2_chan, ++ struct dpaa2_qdma_comp *dpaa2_comp, ++ struct scatterlist *dst_sg, u32 dst_nents, ++ struct scatterlist *src_sg, u32 src_nents) ++{ ++ struct dpaa2_qdma_sg *src_sge; ++ struct dpaa2_qdma_sg *dst_sge; ++ struct qdma_sg_blk *sg_blk; ++ struct qdma_sg_blk *sg_blk_dst; ++ dma_addr_t src; ++ dma_addr_t dst; ++ uint32_t num; ++ uint32_t blocks; ++ uint32_t len = 0; ++ uint32_t total_len = 0; ++ int i, j = 0; ++ ++ num = min(dst_nents, src_nents); ++ blocks = num / (NUM_SG_PER_BLK - 1); ++ if (num % (NUM_SG_PER_BLK - 1)) ++ blocks += 1; ++ if (dpaa2_comp->sg_blk_num < blocks) { ++ len = blocks - dpaa2_comp->sg_blk_num; ++ for (i = 0; i < len; i++) { ++ /* source sg blocks */ ++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); ++ if (!sg_blk) ++ return 0; ++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head); ++ /* destination sg blocks */ ++ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); ++ if (!sg_blk) ++ return 0; ++ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head); ++ } ++ } else { ++ len = dpaa2_comp->sg_blk_num - blocks; ++ for (i = 0; i < len; i++) { ++ spin_lock(&dpaa2_chan->queue_lock); ++ /* handle source sg blocks */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); ++ /* handle destination sg blocks */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head, ++ struct qdma_sg_blk, list); ++ list_del(&sg_blk->list); ++ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); ++ spin_unlock(&dpaa2_chan->queue_lock); ++ } ++ } ++ dpaa2_comp->sg_blk_num = blocks; ++ ++ /* get the first source sg phy address */ ++ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, ++ struct qdma_sg_blk, list); ++ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr; ++ /* get the first destinaiton sg phy address */ ++ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head, ++ struct qdma_sg_blk, list); ++ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr; ++ ++ for (i = 0; i < blocks; i++) { ++ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr; ++ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr; ++ ++ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) { ++ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg)); ++ if (0 == len) ++ goto fetch; ++ total_len += len; ++ src = sg_dma_address(src_sg); ++ dst = sg_dma_address(dst_sg); ++ ++ /* source SG */ ++ src_sge->addr_lo = src; ++ src_sge->addr_hi = (src >> 32); ++ src_sge->data_len.data_len_sl0 = len; ++ src_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB; ++ /* destination SG */ ++ dst_sge->addr_lo = dst; ++ dst_sge->addr_hi = (dst >> 32); ++ dst_sge->data_len.data_len_sl0 = len; ++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB; ++fetch: ++ num--; ++ if (0 == num) { ++ src_sge->ctrl.f = QDMA_SG_F; ++ dst_sge->ctrl.f = QDMA_SG_F; ++ goto end; ++ } ++ dst_sg = sg_next(dst_sg); ++ src_sg = sg_next(src_sg); ++ src_sge++; ++ dst_sge++; ++ if (j == (NUM_SG_PER_BLK - 2)) { ++ /* for next blocks, extension */ ++ sg_blk = list_next_entry(sg_blk, list); ++ sg_blk_dst = list_next_entry(sg_blk_dst, list); ++ src_sge->addr_lo = sg_blk->blk_bus_addr; ++ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32; ++ src_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; ++ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr; ++ dst_sge->addr_hi = ++ sg_blk_dst->blk_bus_addr >> 32; ++ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; ++ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; ++ } ++ } ++ } ++ ++end: ++ return total_len; ++} ++ ++static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg( ++ struct dma_chan *chan, ++ struct scatterlist *dst_sg, u32 dst_nents, ++ struct scatterlist *src_sg, u32 src_nents, ++ unsigned long flags) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_frame_list *f_list; ++ struct device *dev = dpaa2_chan->qdma->priv->dev; ++ uint32_t total_len = 0; ++ ++ /* basic sanity checks */ ++ if (dst_nents == 0 || src_nents == 0) ++ return NULL; ++ ++ if (dst_sg == NULL || src_sg == NULL) ++ return NULL; ++ ++ /* get the descriptors required */ ++ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); ++ ++ /* populate Frame descriptor */ ++ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp); ++ ++ /* prepare Scatter gather entry for source and destination */ ++ total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan, ++ dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents); ++ ++ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr; ++ /* first frame list for descriptor buffer */ ++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); ++ f_list++; ++ /* prepare Scatter gather entry for source and destination */ ++ /* populate source and destination frame list table */ ++ dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr, ++ dpaa2_comp->sge_src_bus_addr, ++ total_len, QDMA_FL_FMT_SGE); ++ ++ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); ++} ++ ++static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan, ++ dma_cookie_t cookie, struct dma_tx_state *txstate) ++{ ++ return dma_cookie_status(chan, cookie, txstate); ++} ++ ++static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc) ++{ ++} ++ ++static void dpaa2_qdma_issue_pending(struct dma_chan *chan) ++{ ++ struct dpaa2_qdma_comp *dpaa2_comp; ++ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; ++ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv; ++ struct virt_dma_desc *vdesc; ++ struct dpaa2_fd *fd; ++ int err; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); ++ spin_lock(&dpaa2_chan->vchan.lock); ++ if (vchan_issue_pending(&dpaa2_chan->vchan)) { ++ vdesc = vchan_next_desc(&dpaa2_chan->vchan); ++ if (!vdesc) ++ goto err_enqueue; ++ dpaa2_comp = to_fsl_qdma_comp(vdesc); ++ ++ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; ++ ++ list_del(&vdesc->node); ++ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used); ++ ++ /* TOBO: priority hard-coded to zero */ ++ err = dpaa2_io_service_enqueue_fq(NULL, ++ priv->tx_queue_attr[0].fqid, fd); ++ if (err) { ++ list_del(&dpaa2_comp->list); ++ list_add_tail(&dpaa2_comp->list, ++ &dpaa2_chan->comp_free); ++ } ++ ++ } ++err_enqueue: ++ spin_unlock(&dpaa2_chan->vchan.lock); ++ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); ++} ++ ++static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_qdma_priv *priv; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ uint8_t prio_def = DPDMAI_PRIO_NUM; ++ int err; ++ int i; ++ ++ priv = dev_get_drvdata(dev); ++ ++ priv->dev = dev; ++ priv->dpqdma_id = ls_dev->obj_desc.id; ++ ++ /*Get the handle for the DPDMAI this interface is associate with */ ++ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpdmai_open() failed\n"); ++ return err; ++ } ++ dev_info(dev, "Opened dpdmai object successfully\n"); ++ ++ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, ++ &priv->dpdmai_attr); ++ if (err) { ++ dev_err(dev, "dpdmai_get_attributes() failed\n"); ++ return err; ++ } ++ ++ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { ++ dev_err(dev, "DPDMAI major version mismatch\n" ++ "Found %u.%u, supported version is %u.%u\n", ++ priv->dpdmai_attr.version.major, ++ priv->dpdmai_attr.version.minor, ++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); ++ } ++ ++ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { ++ dev_err(dev, "DPDMAI minor version mismatch\n" ++ "Found %u.%u, supported version is %u.%u\n", ++ priv->dpdmai_attr.version.major, ++ priv->dpdmai_attr.version.minor, ++ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); ++ } ++ ++ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def); ++ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL); ++ if (!ppriv) { ++ dev_err(dev, "kzalloc for ppriv failed\n"); ++ return -1; ++ } ++ priv->ppriv = ppriv; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ i, &priv->rx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpdmai_get_rx_queue() failed\n"); ++ return err; ++ } ++ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; ++ ++ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ i, &priv->tx_queue_attr[i]); ++ if (err) { ++ dev_err(dev, "dpdmai_get_tx_queue() failed\n"); ++ return err; ++ } ++ ppriv->req_fqid = priv->tx_queue_attr[i].fqid; ++ ppriv->prio = i; ++ ppriv->priv = priv; ++ ppriv++; ++ } ++ ++ return 0; ++} ++ ++static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx, ++ struct dpaa2_qdma_priv_per_prio, nctx); ++ struct dpaa2_qdma_priv *priv = ppriv->priv; ++ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp; ++ struct dpaa2_qdma_chan *qchan; ++ const struct dpaa2_fd *fd; ++ const struct dpaa2_fd *fd_eq; ++ struct dpaa2_dq *dq; ++ int err; ++ int is_last = 0; ++ uint8_t status; ++ int i; ++ int found; ++ uint32_t n_chans = priv->dpaa2_qdma->n_chans; ++ ++ do { ++ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, ++ ppriv->store); ++ } while (err); ++ ++ while (!is_last) { ++ do { ++ dq = dpaa2_io_store_next(ppriv->store, &is_last); ++ } while (!is_last && !dq); ++ if (!dq) { ++ dev_err(priv->dev, "FQID returned no valid frames!\n"); ++ continue; ++ } ++ ++ /* obtain FD and process the error */ ++ fd = dpaa2_dq_fd(dq); ++ status = fd->simple.ctrl & 0xff; ++ if (status) ++ dev_err(priv->dev, "FD error occurred\n"); ++ found = 0; ++ for (i = 0; i < n_chans; i++) { ++ qchan = &priv->dpaa2_qdma->chans[i]; ++ spin_lock(&qchan->queue_lock); ++ if (list_empty(&qchan->comp_used)) { ++ spin_unlock(&qchan->queue_lock); ++ continue; ++ } ++ list_for_each_entry_safe(dpaa2_comp, _comp_tmp, ++ &qchan->comp_used, list) { ++ fd_eq = (struct dpaa2_fd *) ++ dpaa2_comp->fd_virt_addr; ++ ++ if (fd_eq->simple.addr == ++ fd->simple.addr) { ++ ++ list_del(&dpaa2_comp->list); ++ list_add_tail(&dpaa2_comp->list, ++ &qchan->comp_free); ++ ++ spin_lock(&qchan->vchan.lock); ++ vchan_cookie_complete( ++ &dpaa2_comp->vdesc); ++ spin_unlock(&qchan->vchan.lock); ++ found = 1; ++ break; ++ } ++ } ++ spin_unlock(&qchan->queue_lock); ++ if (found) ++ break; ++ } ++ } ++ ++ dpaa2_io_service_rearm(NULL, ctx); ++} ++ ++static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv) ++{ ++ int err, i, num; ++ struct device *dev = priv->dev; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ ++ num = priv->num_pairs; ++ ppriv = priv->ppriv; ++ for (i = 0; i < num; i++) { ++ ppriv->nctx.is_cdan = 0; ++ ppriv->nctx.desired_cpu = 1; ++ ppriv->nctx.id = ppriv->rsp_fqid; ++ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; ++ err = dpaa2_io_service_register(NULL, &ppriv->nctx); ++ if (err) { ++ dev_err(dev, "Notification register failed\n"); ++ goto err_service; ++ } ++ ++ ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, ++ dev); ++ if (!ppriv->store) { ++ dev_err(dev, "dpaa2_io_store_create() failed\n"); ++ goto err_store; ++ } ++ ++ ppriv++; ++ } ++ return 0; ++ ++err_store: ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++err_service: ++ ppriv--; ++ while (ppriv >= priv->ppriv) { ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_store_destroy(ppriv->store); ++ ppriv--; ++ } ++ return -1; ++} ++ ++static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ dpaa2_io_store_destroy(ppriv->store); ++ ppriv++; ++ } ++} ++ ++static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) ++{ ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ ppriv++; ++ } ++} ++ ++static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) ++{ ++ int err; ++ struct dpdmai_rx_queue_cfg rx_queue_cfg; ++ struct device *dev = priv->dev; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ int i, num; ++ ++ num = priv->num_pairs; ++ ppriv = priv->ppriv; ++ for (i = 0; i < num; i++) { ++ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX | ++ DPDMAI_QUEUE_OPT_DEST; ++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; ++ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO; ++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; ++ rx_queue_cfg.dest_cfg.priority = ppriv->prio; ++ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, ++ rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg); ++ if (err) { ++ dev_err(dev, "dpdmai_set_rx_queue() failed\n"); ++ return err; ++ } ++ ++ ppriv++; ++ } ++ ++ return 0; ++} ++ ++static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv) ++{ ++ int err = 0; ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ int i; ++ ++ for (i = 0; i < priv->num_pairs; i++) { ++ ppriv->nctx.qman64 = 0; ++ ppriv->nctx.dpio_id = 0; ++ ppriv++; ++ } ++ ++ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle); ++ if (err) ++ dev_err(dev, "dpdmai_reset() failed\n"); ++ ++ return err; ++} ++ ++static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan, ++ struct list_head *head) ++{ ++ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp; ++ /* free the QDMA SG pool block */ ++ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) { ++ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *) ++ sgb_tmp->blk_virt_addr - 1); ++ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr ++ - sizeof(*sgb_tmp); ++ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr, ++ sgb_tmp->blk_bus_addr); ++ } ++ ++} ++ ++static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, ++ struct list_head *head) ++{ ++ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp; ++ /* free the QDMA comp resource */ ++ list_for_each_entry_safe(comp_tmp, _comp_tmp, ++ head, list) { ++ dma_pool_free(qchan->fd_pool, ++ comp_tmp->fd_virt_addr, ++ comp_tmp->fd_bus_addr); ++ /* free the SG source block on comp */ ++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head); ++ /* free the SG destination block on comp */ ++ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head); ++ list_del(&comp_tmp->list); ++ kfree(comp_tmp); ++ } ++ ++} ++ ++static void __cold dpaa2_dpdmai_free_channels( ++ struct dpaa2_qdma_engine *dpaa2_qdma) ++{ ++ struct dpaa2_qdma_chan *qchan; ++ int num, i; ++ ++ num = dpaa2_qdma->n_chans; ++ for (i = 0; i < num; i++) { ++ qchan = &dpaa2_qdma->chans[i]; ++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); ++ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); ++ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free); ++ dma_pool_destroy(qchan->fd_pool); ++ dma_pool_destroy(qchan->sg_blk_pool); ++ } ++} ++ ++static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma) ++{ ++ struct dpaa2_qdma_chan *dpaa2_chan; ++ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev; ++ int i; ++ ++ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels); ++ for (i = 0; i < dpaa2_qdma->n_chans; i++) { ++ dpaa2_chan = &dpaa2_qdma->chans[i]; ++ dpaa2_chan->qdma = dpaa2_qdma; ++ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; ++ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); ++ ++ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", ++ dev, FD_POOL_SIZE, 32, 0); ++ if (!dpaa2_chan->fd_pool) ++ return -1; ++ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool", ++ dev, SG_POOL_SIZE, 32, 0); ++ if (!dpaa2_chan->sg_blk_pool) ++ return -1; ++ ++ spin_lock_init(&dpaa2_chan->queue_lock); ++ INIT_LIST_HEAD(&dpaa2_chan->comp_used); ++ INIT_LIST_HEAD(&dpaa2_chan->comp_free); ++ INIT_LIST_HEAD(&dpaa2_chan->sgb_free); ++ } ++ return 0; ++} ++ ++static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev) ++{ ++ struct dpaa2_qdma_priv *priv; ++ struct device *dev = &dpdmai_dev->dev; ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ int err; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ dev_set_drvdata(dev, priv); ++ priv->dpdmai_dev = dpdmai_dev; ++ ++ priv->iommu_domain = iommu_get_domain_for_dev(dev); ++ if (priv->iommu_domain) ++ smmu_disable = false; ++ ++ /* obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_mcportal; ++ } ++ ++ /* DPDMAI initialization */ ++ err = dpaa2_qdma_setup(dpdmai_dev); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_setup() failed\n"); ++ goto err_dpdmai_setup; ++ } ++ ++ /* DPIO */ ++ err = dpaa2_qdma_dpio_setup(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n"); ++ goto err_dpio_setup; ++ } ++ ++ /* DPDMAI binding to DPIO */ ++ err = dpaa2_dpdmai_bind(priv); ++ if (err) { ++ dev_err(dev, "dpaa2_dpdmai_bind() failed\n"); ++ goto err_bind; ++ } ++ ++ /* DPDMAI enable */ ++ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpdmai_enable() faile\n"); ++ goto err_enable; ++ } ++ ++ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL); ++ if (!dpaa2_qdma) { ++ err = -ENOMEM; ++ goto err_eng; ++ } ++ ++ priv->dpaa2_qdma = dpaa2_qdma; ++ dpaa2_qdma->priv = priv; ++ ++ dpaa2_qdma->n_chans = NUM_CH; ++ ++ err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma); ++ if (err) { ++ dev_err(dev, "QDMA alloc channels faile\n"); ++ goto err_reg; ++ } ++ ++ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask); ++ ++ dpaa2_qdma->dma_dev.dev = dev; ++ dpaa2_qdma->dma_dev.device_alloc_chan_resources ++ = dpaa2_qdma_alloc_chan_resources; ++ dpaa2_qdma->dma_dev.device_free_chan_resources ++ = dpaa2_qdma_free_chan_resources; ++ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status; ++ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy; ++ dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg; ++ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending; ++ ++ err = dma_async_device_register(&dpaa2_qdma->dma_dev); ++ if (err) { ++ dev_err(dev, "Can't register NXP QDMA engine.\n"); ++ goto err_reg; ++ } ++ ++ return 0; ++ ++err_reg: ++ dpaa2_dpdmai_free_channels(dpaa2_qdma); ++ kfree(dpaa2_qdma); ++err_eng: ++ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle); ++err_enable: ++ dpaa2_dpdmai_dpio_unbind(priv); ++err_bind: ++ dpaa2_dpmai_store_free(priv); ++ dpaa2_dpdmai_dpio_free(priv); ++err_dpio_setup: ++ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle); ++err_dpdmai_setup: ++ fsl_mc_portal_free(priv->mc_io); ++err_mcportal: ++ kfree(priv->ppriv); ++ kfree(priv); ++ dev_set_drvdata(dev, NULL); ++ return err; ++} ++ ++static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct dpaa2_qdma_priv *priv; ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ ++ dev = &ls_dev->dev; ++ priv = dev_get_drvdata(dev); ++ dpaa2_qdma = priv->dpaa2_qdma; ++ ++ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); ++ dpaa2_dpdmai_dpio_unbind(priv); ++ dpaa2_dpmai_store_free(priv); ++ dpaa2_dpdmai_dpio_free(priv); ++ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); ++ fsl_mc_portal_free(priv->mc_io); ++ dev_set_drvdata(dev, NULL); ++ dpaa2_dpdmai_free_channels(dpaa2_qdma); ++ ++ dma_async_device_unregister(&dpaa2_qdma->dma_dev); ++ kfree(priv); ++ kfree(dpaa2_qdma); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpdmai", ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_qdma_driver = { ++ .driver = { ++ .name = "dpaa2-qdma", ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_qdma_probe, ++ .remove = dpaa2_qdma_remove, ++ .match_id_table = dpaa2_qdma_id_table ++}; ++ ++static int __init dpaa2_qdma_driver_init(void) ++{ ++ return fsl_mc_driver_register(&(dpaa2_qdma_driver)); ++} ++late_initcall(dpaa2_qdma_driver_init); ++ ++static void __exit fsl_qdma_exit(void) ++{ ++ fsl_mc_driver_unregister(&(dpaa2_qdma_driver)); ++} ++module_exit(fsl_qdma_exit); ++ ++MODULE_DESCRIPTION("NXP DPAA2 qDMA driver"); ++MODULE_LICENSE("Dual BSD/GPL"); +diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h +new file mode 100644 +index 00000000..71a00db8 +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h +@@ -0,0 +1,262 @@ ++/* Copyright 2015 NXP Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of NXP Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA2_QDMA_H ++#define __DPAA2_QDMA_H ++ ++#define LONG_FORMAT 1 ++ ++#define DPAA2_QDMA_STORE_SIZE 16 ++#define NUM_CH 8 ++#define NUM_SG_PER_BLK 16 ++ ++#define QDMA_DMR_OFFSET 0x0 ++#define QDMA_DQ_EN (0 << 30) ++#define QDMA_DQ_DIS (1 << 30) ++ ++#define QDMA_DSR_M_OFFSET 0x10004 ++ ++struct dpaa2_qdma_sd_d { ++ uint32_t rsv:32; ++ union { ++ struct { ++ uint32_t ssd:12; /* souce stride distance */ ++ uint32_t sss:12; /* souce stride size */ ++ uint32_t rsv1:8; ++ } sdf; ++ struct { ++ uint32_t dsd:12; /* Destination stride distance */ ++ uint32_t dss:12; /* Destination stride size */ ++ uint32_t rsv2:8; ++ } ddf; ++ } df; ++ uint32_t rbpcmd; /* Route-by-port command */ ++ uint32_t cmd; ++} __attribute__((__packed__)); ++/* Source descriptor command read transaction type for RBP=0: ++ coherent copy of cacheable memory */ ++#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28) ++/* Destination descriptor command write transaction type for RBP=0: ++ coherent copy of cacheable memory */ ++#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) ++ ++#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */ ++#define QDMA_SG_FMT_FDS 0x1 /* frame data section */ ++#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */ ++#define QDMA_SG_SL_SHORT 0x1 /* short length */ ++#define QDMA_SG_SL_LONG 0x0 /* short length */ ++#define QDMA_SG_F 0x1 /* last sg entry */ ++struct dpaa2_qdma_sg { ++ uint32_t addr_lo; /* address 0:31 */ ++ uint32_t addr_hi:17; /* address 32:48 */ ++ uint32_t rsv:15; ++ union { ++ uint32_t data_len_sl0; /* SL=0, the long format */ ++ struct { ++ uint32_t len:17; /* SL=1, the short format */ ++ uint32_t reserve:3; ++ uint32_t sf:1; ++ uint32_t sr:1; ++ uint32_t size:10; /* buff size */ ++ } data_len_sl1; ++ } data_len; /* AVAIL_LENGTH */ ++ struct { ++ uint32_t bpid:14; ++ uint32_t ivp:1; ++ uint32_t mbt:1; ++ uint32_t offset:12; ++ uint32_t fmt:2; ++ uint32_t sl:1; ++ uint32_t f:1; ++ } ctrl; ++} __attribute__((__packed__)); ++ ++#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */ ++#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */ ++#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */ ++#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */ ++#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */ ++ ++#define QDMA_SB_FRAME (0 << 28) /* single frame */ ++#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */ ++#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */ ++#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */ ++ ++#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */ ++#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */ ++#define QDMA_SER_DISABLE (0 << 8) /* no notification */ ++#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */ ++#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ ++#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ ++#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */ ++ ++#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */ ++#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */ ++#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */ ++#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */ ++#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */ ++ ++#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */ ++#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */ ++#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */ ++#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */ ++#define QDMA_FL_SL_LONG 0x0 /* long length */ ++#define QDMA_FL_SL_SHORT 0x1 /* short length */ ++#define QDMA_FL_F 0x1 /* last frame list bit */ ++/*Description of Frame list table structure*/ ++struct dpaa2_frame_list { ++ uint32_t addr_lo; /* lower 32 bits of address */ ++ uint32_t addr_hi:17; /* upper 17 bits of address */ ++ uint32_t resrvd:15; ++ union { ++ uint32_t data_len_sl0; /* If SL=0, then data length is 32 */ ++ struct { ++ uint32_t data_len:18; /* IF SL=1; length is 18bit */ ++ uint32_t resrvd:2; ++ uint32_t mem:12; /* Valid only when SL=1 */ ++ } data_len_sl1; ++ } data_len; ++ /* word 4 */ ++ uint32_t bpid:14; /* Frame buffer pool ID */ ++ uint32_t ivp:1; /* Invalid Pool ID. */ ++ uint32_t bmt:1; /* Bypass Memory Translation */ ++ uint32_t offset:12; /* Frame offset */ ++ uint32_t fmt:2; /* Frame Format */ ++ uint32_t sl:1; /* Short Length */ ++ uint32_t f:1; /* Final bit */ ++ ++ uint32_t frc; /* Frame Context */ ++ /* word 6 */ ++ uint32_t err:8; /* Frame errors */ ++ uint32_t resrvd0:8; ++ uint32_t asal:4; /* accelerator-specific annotation length */ ++ uint32_t resrvd1:1; ++ uint32_t ptv2:1; ++ uint32_t ptv1:1; ++ uint32_t pta:1; /* pass-through annotation */ ++ uint32_t resrvd2:8; ++ ++ uint32_t flc_lo; /* lower 32 bits fo flow context */ ++ uint32_t flc_hi; /* higher 32 bits fo flow context */ ++} __attribute__((__packed__)); ++ ++struct dpaa2_qdma_chan { ++ struct virt_dma_chan vchan; ++ struct virt_dma_desc vdesc; ++ enum dma_status status; ++ struct dpaa2_qdma_engine *qdma; ++ ++ struct mutex dpaa2_queue_mutex; ++ spinlock_t queue_lock; ++ struct dma_pool *fd_pool; ++ struct dma_pool *sg_blk_pool; ++ ++ struct list_head comp_used; ++ struct list_head comp_free; ++ ++ struct list_head sgb_free; ++}; ++ ++struct qdma_sg_blk { ++ dma_addr_t blk_bus_addr; ++ void *blk_virt_addr; ++ struct list_head list; ++}; ++ ++struct dpaa2_qdma_comp { ++ dma_addr_t fd_bus_addr; ++ dma_addr_t fl_bus_addr; ++ dma_addr_t desc_bus_addr; ++ dma_addr_t sge_src_bus_addr; ++ dma_addr_t sge_dst_bus_addr; ++ void *fd_virt_addr; ++ void *fl_virt_addr; ++ void *desc_virt_addr; ++ void *sg_src_virt_addr; ++ void *sg_dst_virt_addr; ++ struct qdma_sg_blk *sg_blk; ++ uint32_t sg_blk_num; ++ struct list_head sg_src_head; ++ struct list_head sg_dst_head; ++ struct dpaa2_qdma_chan *qchan; ++ struct virt_dma_desc vdesc; ++ struct list_head list; ++}; ++ ++struct dpaa2_qdma_engine { ++ struct dma_device dma_dev; ++ u32 n_chans; ++ struct dpaa2_qdma_chan chans[NUM_CH]; ++ ++ struct dpaa2_qdma_priv *priv; ++}; ++ ++/* ++ * dpaa2_qdma_priv - driver private data ++ */ ++struct dpaa2_qdma_priv { ++ int dpqdma_id; ++ ++ struct iommu_domain *iommu_domain; ++ struct dpdmai_attr dpdmai_attr; ++ struct device *dev; ++ struct fsl_mc_io *mc_io; ++ struct fsl_mc_device *dpdmai_dev; ++ ++ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; ++ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM]; ++ ++ uint8_t num_pairs; ++ ++ struct dpaa2_qdma_engine *dpaa2_qdma; ++ struct dpaa2_qdma_priv_per_prio *ppriv; ++}; ++ ++struct dpaa2_qdma_priv_per_prio { ++ int req_fqid; ++ int rsp_fqid; ++ int prio; ++ ++ struct dpaa2_io_store *store; ++ struct dpaa2_io_notification_ctx nctx; ++ ++ struct dpaa2_qdma_priv *priv; ++}; ++ ++/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ ++#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ ++ sizeof(struct dpaa2_frame_list) * 3 + \ ++ sizeof(struct dpaa2_qdma_sd_d) * 2) ++ ++/* qdma_sg_blk + 16 SGs */ ++#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\ ++ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK) ++#endif /* __DPAA2_QDMA_H */ +diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c +new file mode 100644 +index 00000000..ad13fc1e +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/dpdmai.c +@@ -0,0 +1,454 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include <linux/types.h> ++#include <linux/io.h> ++#include "fsl_dpdmai.h" ++#include "fsl_dpdmai_cmd.h" ++#include "../../../drivers/staging/fsl-mc/include/mc-sys.h" ++#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h" ++ ++int dpdmai_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpdmai_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPDMAI_CMD_OPEN(cmd, dpdmai_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpdmai_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, ++ cmd_flags, token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpdmai_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPDMAI_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpdmai_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpdmai_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpdmai_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpdmai_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpdmai_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ const struct dpdmai_rx_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, struct dpdmai_rx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr); ++ ++ return 0; ++} ++ ++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_tx_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, ++ cmd_flags, ++ token); ++ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr); ++ ++ return 0; ++} +diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h +new file mode 100644 +index 00000000..e931ce16 +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h +@@ -0,0 +1,521 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPDMAI_H ++#define __FSL_DPDMAI_H ++ ++struct fsl_mc_io; ++ ++/* Data Path DMA Interface API ++ * Contains initialization APIs and runtime control APIs for DPDMAI ++ */ ++ ++/* General DPDMAI macros */ ++ ++/** ++ * Maximum number of Tx/Rx priorities per DPDMAI object ++ */ ++#define DPDMAI_PRIO_NUM 2 ++ ++/** ++ * All queues considered; see dpdmai_set_rx_queue() ++ */ ++#define DPDMAI_ALL_QUEUES (uint8_t)(-1) ++ ++/** ++ * dpdmai_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpdmai_id: DPDMAI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpdmai_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpdmai_id, ++ uint16_t *token); ++ ++/** ++ * dpdmai_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpdmai_cfg - Structure representing DPDMAI configuration ++ * @priorities: Priorities for the DMA hardware processing; valid priorities are ++ * configured with values 1-8; the entry following last valid entry ++ * should be configured with 0 ++ */ ++struct dpdmai_cfg { ++ uint8_t priorities[DPDMAI_PRIO_NUM]; ++}; ++ ++/** ++ * dpdmai_create() - Create the DPDMAI object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPDMAI object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpdmai_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpdmai_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpdmai_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpdmai_is_enabled() - Check if the DPDMAI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpdmai_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpdmai_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpdmai_irq_cfg *irq_cfg); ++ ++/** ++ * dpdmai_get_irq() - Get IRQ information from the DPDMAI ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpdmai_irq_cfg *irq_cfg); ++ ++/** ++ * dpdmai_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpdmai_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned Interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpdmai_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpdmai_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpdmai_get_irq_status() - Get the current status of any pending interrupts ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpdmai_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpdmai_attr - Structure representing DPDMAI attributes ++ * @id: DPDMAI object ID ++ * @version: DPDMAI version ++ * @num_of_priorities: number of priorities ++ */ ++struct dpdmai_attr { ++ int id; ++ /** ++ * struct version - DPDMAI version ++ * @major: DPDMAI major version ++ * @minor: DPDMAI minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint8_t num_of_priorities; ++}; ++ ++/** ++ * dpdmai_get_attributes() - Retrieve DPDMAI attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpdmai_attr *attr); ++ ++/** ++ * enum dpdmai_dest - DPDMAI destination types ++ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode ++ * and does not generate FQDAN notifications; user is expected to dequeue ++ * from the queue based on polling or other user-defined method ++ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue ++ * from the queue only after notification is received ++ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON object; ++ * user is expected to dequeue from the DPCON channel ++ */ ++enum dpdmai_dest { ++ DPDMAI_DEST_NONE = 0, ++ DPDMAI_DEST_DPIO = 1, ++ DPDMAI_DEST_DPCON = 2 ++}; ++ ++/** ++ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that ++ * channel; not relevant for 'DPDMAI_DEST_NONE' option ++ */ ++struct dpdmai_dest_cfg { ++ enum dpdmai_dest dest_type; ++ int dest_id; ++ uint8_t priority; ++}; ++ ++/* DPDMAI queue modification options */ ++ ++/** ++ * Select to modify the user's context associated with the queue ++ */ ++#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 ++ ++/** ++ * Select to modify the queue's destination ++ */ ++#define DPDMAI_QUEUE_OPT_DEST 0x00000002 ++ ++/** ++ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration ++ * @options: Flags representing the suggested modifications to the queue; ++ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame; ++ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' ++ * @dest_cfg: Queue destination parameters; ++ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' ++ */ ++struct dpdmai_rx_queue_cfg { ++ uint32_t options; ++ uint64_t user_ctx; ++ struct dpdmai_dest_cfg dest_cfg; ++ ++}; ++ ++/** ++ * dpdmai_set_rx_queue() - Set Rx queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation; use ++ * DPDMAI_ALL_QUEUES to configure all Rx queues ++ * identically. ++ * @cfg: Rx queue configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ const struct dpdmai_rx_queue_cfg *cfg); ++ ++/** ++ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame ++ * @dest_cfg: Queue destination configuration ++ * @fqid: Virtual FQID value to be used for dequeue operations ++ */ ++struct dpdmai_rx_queue_attr { ++ uint64_t user_ctx; ++ struct dpdmai_dest_cfg dest_cfg; ++ uint32_t fqid; ++}; ++ ++/** ++ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation ++ * @attr: Returned Rx queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_rx_queue_attr *attr); ++ ++/** ++ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues ++ * @fqid: Virtual FQID to be used for sending frames to DMA hardware ++ */ ++ ++struct dpdmai_tx_queue_attr { ++ uint32_t fqid; ++}; ++ ++/** ++ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPDMAI object ++ * @priority: Select the queue relative to number of ++ * priorities configured at DPDMAI creation ++ * @attr: Returned Tx queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t priority, ++ struct dpdmai_tx_queue_attr *attr); ++ ++#endif /* __FSL_DPDMAI_H */ +diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h +new file mode 100644 +index 00000000..7d403c01 +--- /dev/null ++++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h +@@ -0,0 +1,222 @@ ++/* Copyright 2013-2016 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPDMAI_CMD_H ++#define _FSL_DPDMAI_CMD_H ++ ++/* DPDMAI Version */ ++#define DPDMAI_VER_MAJOR 2 ++#define DPDMAI_VER_MINOR 2 ++ ++#define DPDMAI_CMD_BASE_VERSION 0 ++#define DPDMAI_CMD_ID_OFFSET 4 ++ ++/* Command IDs */ ++#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) ++ ++ ++#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ ++#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ ++ ++ ++#define MAKE_UMASK64(_width) \ ++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ ++ (uint64_t)-1)) ++ ++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) ++{ ++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); ++} ++ ++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) ++{ ++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); ++} ++ ++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) ++ ++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) ++ ++#define MC_CMD_HDR_READ_TOKEN(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \ ++ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) ++ ++#endif /* _FSL_DPDMAI_CMD_H */ +diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c +new file mode 100644 +index 00000000..6c4c2813 +--- /dev/null ++++ b/drivers/dma/fsl-qdma.c +@@ -0,0 +1,1201 @@ ++/* ++ * drivers/dma/fsl-qdma.c ++ * ++ * Copyright 2014-2015 Freescale Semiconductor, Inc. ++ * ++ * Driver for the Freescale qDMA engine with software command queue mode. ++ * Channel virtualization is supported through enqueuing of DMA jobs to, ++ * or dequeuing DMA jobs from, different work queues. ++ * This module can be found on Freescale LS SoCs. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include <asm/cacheflush.h> ++#include <linux/clk.h> ++#include <linux/delay.h> ++#include <linux/dma-mapping.h> ++#include <linux/dmapool.h> ++#include <linux/init.h> ++#include <linux/interrupt.h> ++#include <linux/module.h> ++#include <linux/of.h> ++#include <linux/of_address.h> ++#include <linux/of_device.h> ++#include <linux/of_dma.h> ++#include <linux/of_irq.h> ++#include <linux/slab.h> ++#include <linux/spinlock.h> ++ ++#include "virt-dma.h" ++ ++#define FSL_QDMA_DMR 0x0 ++#define FSL_QDMA_DSR 0x4 ++#define FSL_QDMA_DEIER 0xe00 ++#define FSL_QDMA_DEDR 0xe04 ++#define FSL_QDMA_DECFDW0R 0xe10 ++#define FSL_QDMA_DECFDW1R 0xe14 ++#define FSL_QDMA_DECFDW2R 0xe18 ++#define FSL_QDMA_DECFDW3R 0xe1c ++#define FSL_QDMA_DECFQIDR 0xe30 ++#define FSL_QDMA_DECBR 0xe34 ++ ++#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) ++#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x)) ++#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) ++#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) ++#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) ++#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x)) ++#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x)) ++#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x)) ++ ++#define FSL_QDMA_SQDPAR 0x80c ++#define FSL_QDMA_SQEPAR 0x814 ++#define FSL_QDMA_BSQMR 0x800 ++#define FSL_QDMA_BSQSR 0x804 ++#define FSL_QDMA_BSQICR 0x828 ++#define FSL_QDMA_CQMR 0xa00 ++#define FSL_QDMA_CQDSCR1 0xa08 ++#define FSL_QDMA_CQDSCR2 0xa0c ++#define FSL_QDMA_CQIER 0xa10 ++#define FSL_QDMA_CQEDR 0xa14 ++#define FSL_QDMA_SQCCMR 0xa20 ++ ++#define FSL_QDMA_SQICR_ICEN ++ ++#define FSL_QDMA_CQIDR_CQT 0xff000000 ++#define FSL_QDMA_CQIDR_SQPE 0x800000 ++#define FSL_QDMA_CQIDR_SQT 0x8000 ++ ++#define FSL_QDMA_BCQIER_CQTIE 0x8000 ++#define FSL_QDMA_BCQIER_CQPEIE 0x800000 ++#define FSL_QDMA_BSQICR_ICEN 0x80000000 ++#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16) ++#define FSL_QDMA_CQIER_MEIE 0x80000000 ++#define FSL_QDMA_CQIER_TEIE 0x1 ++#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000 ++ ++#define FSL_QDMA_QUEUE_MAX 8 ++ ++#define FSL_QDMA_BCQMR_EN 0x80000000 ++#define FSL_QDMA_BCQMR_EI 0x40000000 ++#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) ++#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) ++ ++#define FSL_QDMA_BCQSR_QF 0x10000 ++#define FSL_QDMA_BCQSR_XOFF 0x1 ++ ++#define FSL_QDMA_BSQMR_EN 0x80000000 ++#define FSL_QDMA_BSQMR_DI 0x40000000 ++#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) ++ ++#define FSL_QDMA_BSQSR_QE 0x20000 ++ ++#define FSL_QDMA_DMR_DQD 0x40000000 ++#define FSL_QDMA_DSR_DB 0x80000000 ++ ++#define FSL_QDMA_BASE_BUFFER_SIZE 96 ++#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16 ++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 ++#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 ++#define FSL_QDMA_QUEUE_NUM_MAX 8 ++ ++#define FSL_QDMA_CMD_RWTTYPE 0x4 ++#define FSL_QDMA_CMD_LWC 0x2 ++ ++#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 ++#define FSL_QDMA_CMD_NS_OFFSET 27 ++#define FSL_QDMA_CMD_DQOS_OFFSET 24 ++#define FSL_QDMA_CMD_WTHROTL_OFFSET 20 ++#define FSL_QDMA_CMD_DSEN_OFFSET 19 ++#define FSL_QDMA_CMD_LWC_OFFSET 16 ++ ++#define FSL_QDMA_E_SG_TABLE 1 ++#define FSL_QDMA_E_DATA_BUFFER 0 ++#define FSL_QDMA_F_LAST_ENTRY 1 ++ ++u64 pre_addr, pre_queue; ++ ++struct fsl_qdma_ccdf { ++ u8 status; ++ u32 rev1:22; ++ u32 ser:1; ++ u32 rev2:1; ++ u32 rev3:20; ++ u32 offset:9; ++ u32 format:3; ++ union { ++ struct { ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u32 addr_hi:8; /* high 8-bits of 40-bit address */ ++ u32 rev4:16; ++ u32 queue:3; ++ u32 rev5:3; ++ u32 dd:2; /* dynamic debug */ ++ }; ++ struct { ++ u64 addr:40; ++ /* More efficient address accessor */ ++ u64 __notaddress:24; ++ }; ++ }; ++} __packed; ++ ++struct fsl_qdma_csgf { ++ u32 offset:13; ++ u32 rev1:19; ++ u32 length:30; ++ u32 f:1; ++ u32 e:1; ++ union { ++ struct { ++ u32 addr_lo; /* low 32-bits of 40-bit address */ ++ u32 addr_hi:8; /* high 8-bits of 40-bit address */ ++ u32 rev2:24; ++ }; ++ struct { ++ u64 addr:40; ++ /* More efficient address accessor */ ++ u64 __notaddress:24; ++ }; ++ }; ++} __packed; ++ ++struct fsl_qdma_sdf { ++ u32 rev3:32; ++ u32 ssd:12; /* souce stride distance */ ++ u32 sss:12; /* souce stride size */ ++ u32 rev4:8; ++ u32 rev5:32; ++ u32 cmd; ++} __packed; ++ ++struct fsl_qdma_ddf { ++ u32 rev1:32; ++ u32 dsd:12; /* Destination stride distance */ ++ u32 dss:12; /* Destination stride size */ ++ u32 rev2:8; ++ u32 rev3:32; ++ u32 cmd; ++} __packed; ++ ++struct fsl_qdma_chan { ++ struct virt_dma_chan vchan; ++ struct virt_dma_desc vdesc; ++ enum dma_status status; ++ u32 slave_id; ++ struct fsl_qdma_engine *qdma; ++ struct fsl_qdma_queue *queue; ++ struct list_head qcomp; ++}; ++ ++struct fsl_qdma_queue { ++ struct fsl_qdma_ccdf *virt_head; ++ struct fsl_qdma_ccdf *virt_tail; ++ struct list_head comp_used; ++ struct list_head comp_free; ++ struct dma_pool *comp_pool; ++ struct dma_pool *sg_pool; ++ spinlock_t queue_lock; ++ dma_addr_t bus_addr; ++ u32 n_cq; ++ u32 id; ++ struct fsl_qdma_ccdf *cq; ++}; ++ ++struct fsl_qdma_sg { ++ dma_addr_t bus_addr; ++ void *virt_addr; ++}; ++ ++struct fsl_qdma_comp { ++ dma_addr_t bus_addr; ++ void *virt_addr; ++ struct fsl_qdma_chan *qchan; ++ struct fsl_qdma_sg *sg_block; ++ struct virt_dma_desc vdesc; ++ struct list_head list; ++ u32 sg_block_src; ++ u32 sg_block_dst; ++}; ++ ++struct fsl_qdma_engine { ++ struct dma_device dma_dev; ++ void __iomem *ctrl_base; ++ void __iomem *status_base; ++ void __iomem *block_base; ++ u32 n_chans; ++ u32 n_queues; ++ struct mutex fsl_qdma_mutex; ++ int error_irq; ++ int queue_irq; ++ bool big_endian; ++ struct fsl_qdma_queue *queue; ++ struct fsl_qdma_queue *status; ++ struct fsl_qdma_chan chans[]; ++ ++}; ++ ++static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr) ++{ ++ if (qdma->big_endian) ++ return ioread32be(addr); ++ else ++ return ioread32(addr); ++} ++ ++static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val, ++ void __iomem *addr) ++{ ++ if (qdma->big_endian) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan) ++{ ++ return container_of(chan, struct fsl_qdma_chan, vchan.chan); ++} ++ ++static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) ++{ ++ return container_of(vd, struct fsl_qdma_comp, vdesc); ++} ++ ++static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) ++{ ++ /* ++ * In QDMA mode, We don't need to do anything. ++ */ ++ return 0; ++} ++ ++static void fsl_qdma_free_chan_resources(struct dma_chan *chan) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&fsl_chan->vchan.lock, flags); ++ vchan_get_all_descriptors(&fsl_chan->vchan, &head); ++ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&fsl_chan->vchan, &head); ++} ++ ++static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, ++ dma_addr_t dst, dma_addr_t src, u32 len) ++{ ++ struct fsl_qdma_ccdf *ccdf; ++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest; ++ struct fsl_qdma_sdf *sdf; ++ struct fsl_qdma_ddf *ddf; ++ ++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; ++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; ++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; ++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; ++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; ++ ++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); ++ /* Head Command Descriptor(Frame Descriptor) */ ++ ccdf->addr = fsl_comp->bus_addr + 16; ++ ccdf->format = 1; /* Compound S/G format */ ++ /* Status notification is enqueued to status queue. */ ++ ccdf->ser = 1; ++ /* Compound Command Descriptor(Frame List Table) */ ++ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ /* It must be 32 as Compound S/G Descriptor */ ++ csgf_desc->length = 32; ++ csgf_src->addr = src; ++ csgf_src->length = len; ++ csgf_dest->addr = dst; ++ csgf_dest->length = len; ++ /* This entry is the last entry. */ ++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ /* Descriptor Buffer */ ++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET; ++} ++ ++static void fsl_qdma_comp_fill_sg( ++ struct fsl_qdma_comp *fsl_comp, ++ struct scatterlist *dst_sg, unsigned int dst_nents, ++ struct scatterlist *src_sg, unsigned int src_nents) ++{ ++ struct fsl_qdma_ccdf *ccdf; ++ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg; ++ struct fsl_qdma_sdf *sdf; ++ struct fsl_qdma_ddf *ddf; ++ struct fsl_qdma_sg *sg_block, *temp; ++ struct scatterlist *sg; ++ u64 total_src_len = 0; ++ u64 total_dst_len = 0; ++ u32 i; ++ ++ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; ++ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; ++ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; ++ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; ++ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; ++ ++ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); ++ /* Head Command Descriptor(Frame Descriptor) */ ++ ccdf->addr = fsl_comp->bus_addr + 16; ++ ccdf->format = 1; /* Compound S/G format */ ++ /* Status notification is enqueued to status queue. */ ++ ccdf->ser = 1; ++ ++ /* Compound Command Descriptor(Frame List Table) */ ++ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ /* It must be 32 as Compound S/G Descriptor */ ++ csgf_desc->length = 32; ++ ++ sg_block = fsl_comp->sg_block; ++ csgf_src->addr = sg_block->bus_addr; ++ /* This entry link to the s/g entry. */ ++ csgf_src->e = FSL_QDMA_E_SG_TABLE; ++ ++ temp = sg_block + fsl_comp->sg_block_src; ++ csgf_dest->addr = temp->bus_addr; ++ /* This entry is the last entry. */ ++ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ /* This entry link to the s/g entry. */ ++ csgf_dest->e = FSL_QDMA_E_SG_TABLE; ++ ++ for_each_sg(src_sg, sg, src_nents, i) { ++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg->addr = sg_dma_address(sg); ++ csgf_sg->length = sg_dma_len(sg); ++ total_src_len += sg_dma_len(sg); ++ ++ if (i == src_nents - 1) ++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; ++ temp = sg_block + ++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ csgf_sg->addr = temp->bus_addr; ++ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ } ++ } ++ ++ sg_block += fsl_comp->sg_block_src; ++ for_each_sg(dst_sg, sg, dst_nents, i) { ++ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); ++ csgf_sg->addr = sg_dma_address(sg); ++ csgf_sg->length = sg_dma_len(sg); ++ total_dst_len += sg_dma_len(sg); ++ ++ if (i == dst_nents - 1) ++ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { ++ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; ++ temp = sg_block + ++ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ csgf_sg->addr = temp->bus_addr; ++ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ } ++ } ++ ++ if (total_src_len != total_dst_len) ++ dev_err(&fsl_comp->qchan->vchan.chan.dev->device, ++ "The data length for src and dst isn't match.\n"); ++ ++ csgf_src->length = total_src_len; ++ csgf_dest->length = total_dst_len; ++ ++ /* Descriptor Buffer */ ++ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; ++} ++ ++/* ++ * Prei-request full command descriptor for enqueue. ++ */ ++static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue) ++{ ++ struct fsl_qdma_comp *comp_temp; ++ int i; ++ ++ for (i = 0; i < queue->n_cq; i++) { ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ return -1; ++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, ++ GFP_NOWAIT, ++ &comp_temp->bus_addr); ++ if (!comp_temp->virt_addr) ++ return -1; ++ list_add_tail(&comp_temp->list, &queue->comp_free); ++ } ++ return 0; ++} ++ ++/* ++ * Request a command descriptor for enqueue. ++ */ ++static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc( ++ struct fsl_qdma_chan *fsl_chan, ++ unsigned int dst_nents, ++ unsigned int src_nents) ++{ ++ struct fsl_qdma_comp *comp_temp; ++ struct fsl_qdma_sg *sg_block; ++ struct fsl_qdma_queue *queue = fsl_chan->queue; ++ unsigned long flags; ++ unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i; ++ ++ spin_lock_irqsave(&queue->queue_lock, flags); ++ if (list_empty(&queue->comp_free)) { ++ spin_unlock_irqrestore(&queue->queue_lock, flags); ++ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); ++ if (!comp_temp) ++ return NULL; ++ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, ++ GFP_NOWAIT, ++ &comp_temp->bus_addr); ++ if (!comp_temp->virt_addr) ++ return NULL; ++ } else { ++ comp_temp = list_first_entry(&queue->comp_free, ++ struct fsl_qdma_comp, ++ list); ++ list_del(&comp_temp->list); ++ spin_unlock_irqrestore(&queue->queue_lock, flags); ++ } ++ ++ if (dst_nents != 0) ++ dst_sg_entry_block = dst_nents / ++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ else ++ dst_sg_entry_block = 0; ++ ++ if (src_nents != 0) ++ src_sg_entry_block = src_nents / ++ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; ++ else ++ src_sg_entry_block = 0; ++ ++ sg_entry_total = dst_sg_entry_block + src_sg_entry_block; ++ if (sg_entry_total) { ++ sg_block = kzalloc(sizeof(*sg_block) * ++ sg_entry_total, ++ GFP_KERNEL); ++ if (!sg_block) ++ return NULL; ++ comp_temp->sg_block = sg_block; ++ for (i = 0; i < sg_entry_total; i++) { ++ sg_block->virt_addr = dma_pool_alloc(queue->sg_pool, ++ GFP_NOWAIT, ++ &sg_block->bus_addr); ++ memset(sg_block->virt_addr, 0, ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16); ++ sg_block++; ++ } ++ } ++ ++ comp_temp->sg_block_src = src_sg_entry_block; ++ comp_temp->sg_block_dst = dst_sg_entry_block; ++ comp_temp->qchan = fsl_chan; ++ ++ return comp_temp; ++} ++ ++static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources( ++ struct platform_device *pdev, ++ unsigned int queue_num) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_queue *queue_head, *queue_temp; ++ int ret, len, i; ++ unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; ++ ++ if (queue_num > FSL_QDMA_QUEUE_MAX) ++ queue_num = FSL_QDMA_QUEUE_MAX; ++ len = sizeof(*queue_head) * queue_num; ++ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); ++ if (!queue_head) ++ return NULL; ++ ++ ret = of_property_read_u32_array(np, "queue-sizes", queue_size, ++ queue_num); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get queue-sizes.\n"); ++ return NULL; ++ } ++ ++ for (i = 0; i < queue_num; i++) { ++ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ++ || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { ++ dev_err(&pdev->dev, "Get wrong queue-sizes.\n"); ++ return NULL; ++ } ++ queue_temp = queue_head + i; ++ queue_temp->cq = dma_alloc_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ &queue_temp->bus_addr, ++ GFP_KERNEL); ++ if (!queue_temp->cq) ++ return NULL; ++ queue_temp->n_cq = queue_size[i]; ++ queue_temp->id = i; ++ queue_temp->virt_head = queue_temp->cq; ++ queue_temp->virt_tail = queue_temp->cq; ++ /* ++ * The dma pool for queue command buffer ++ */ ++ queue_temp->comp_pool = dma_pool_create("comp_pool", ++ &pdev->dev, ++ FSL_QDMA_BASE_BUFFER_SIZE, ++ 16, 0); ++ if (!queue_temp->comp_pool) { ++ dma_free_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ queue_temp->cq, ++ queue_temp->bus_addr); ++ return NULL; ++ } ++ /* ++ * The dma pool for queue command buffer ++ */ ++ queue_temp->sg_pool = dma_pool_create("sg_pool", ++ &pdev->dev, ++ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16, ++ 64, 0); ++ if (!queue_temp->sg_pool) { ++ dma_free_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ queue_size[i], ++ queue_temp->cq, ++ queue_temp->bus_addr); ++ dma_pool_destroy(queue_temp->comp_pool); ++ return NULL; ++ } ++ /* ++ * List for queue command buffer ++ */ ++ INIT_LIST_HEAD(&queue_temp->comp_used); ++ INIT_LIST_HEAD(&queue_temp->comp_free); ++ spin_lock_init(&queue_temp->queue_lock); ++ } ++ ++ return queue_head; ++} ++ ++static struct fsl_qdma_queue *fsl_qdma_prep_status_queue( ++ struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_queue *status_head; ++ unsigned int status_size; ++ int ret; ++ ++ ret = of_property_read_u32(np, "status-sizes", &status_size); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get status-sizes.\n"); ++ return NULL; ++ } ++ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ++ || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { ++ dev_err(&pdev->dev, "Get wrong status_size.\n"); ++ return NULL; ++ } ++ status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head), ++ GFP_KERNEL); ++ if (!status_head) ++ return NULL; ++ ++ /* ++ * Buffer for queue command ++ */ ++ status_head->cq = dma_alloc_coherent(&pdev->dev, ++ sizeof(struct fsl_qdma_ccdf) * ++ status_size, ++ &status_head->bus_addr, ++ GFP_KERNEL); ++ if (!status_head->cq) ++ return NULL; ++ status_head->n_cq = status_size; ++ status_head->virt_head = status_head->cq; ++ status_head->virt_tail = status_head->cq; ++ status_head->comp_pool = NULL; ++ ++ return status_head; ++} ++ ++static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) ++{ ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ void __iomem *block = fsl_qdma->block_base; ++ int i, count = 5; ++ u32 reg; ++ ++ /* Disable the command queue and wait for idle state. */ ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg |= FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++) ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); ++ ++ while (1) { ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); ++ if (!(reg & FSL_QDMA_DSR_DB)) ++ break; ++ if (count-- < 0) ++ return -EBUSY; ++ udelay(100); ++ } ++ ++ /* Disable status queue. */ ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); ++ ++ /* ++ * Clear the command queue interrupt detect register for all queues. ++ */ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ return 0; ++} ++ ++static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma) ++{ ++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; ++ struct fsl_qdma_queue *fsl_status = fsl_qdma->status; ++ struct fsl_qdma_queue *temp_queue; ++ struct fsl_qdma_comp *fsl_comp; ++ struct fsl_qdma_ccdf *status_addr; ++ struct fsl_qdma_csgf *csgf_src; ++ void __iomem *block = fsl_qdma->block_base; ++ u32 reg, i; ++ bool duplicate, duplicate_handle; ++ ++ while (1) { ++ duplicate = 0; ++ duplicate_handle = 0; ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); ++ if (reg & FSL_QDMA_BSQSR_QE) ++ return 0; ++ status_addr = fsl_status->virt_head; ++ if (status_addr->queue == pre_queue && ++ status_addr->addr == pre_addr) ++ duplicate = 1; ++ ++ i = status_addr->queue; ++ pre_queue = status_addr->queue; ++ pre_addr = status_addr->addr; ++ temp_queue = fsl_queue + i; ++ spin_lock(&temp_queue->queue_lock); ++ if (list_empty(&temp_queue->comp_used)) { ++ if (duplicate) ++ duplicate_handle = 1; ++ else { ++ spin_unlock(&temp_queue->queue_lock); ++ return -1; ++ } ++ } else { ++ fsl_comp = list_first_entry(&temp_queue->comp_used, ++ struct fsl_qdma_comp, ++ list); ++ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr ++ + 2; ++ if (fsl_comp->bus_addr + 16 != ++ (dma_addr_t)status_addr->addr) { ++ if (duplicate) ++ duplicate_handle = 1; ++ else { ++ spin_unlock(&temp_queue->queue_lock); ++ return -1; ++ } ++ } ++ } ++ ++ if (duplicate_handle) { ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); ++ reg |= FSL_QDMA_BSQMR_DI; ++ status_addr->addr = 0x0; ++ fsl_status->virt_head++; ++ if (fsl_status->virt_head == fsl_status->cq ++ + fsl_status->n_cq) ++ fsl_status->virt_head = fsl_status->cq; ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ spin_unlock(&temp_queue->queue_lock); ++ continue; ++ } ++ list_del(&fsl_comp->list); ++ ++ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); ++ reg |= FSL_QDMA_BSQMR_DI; ++ status_addr->addr = 0x0; ++ fsl_status->virt_head++; ++ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) ++ fsl_status->virt_head = fsl_status->cq; ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ spin_unlock(&temp_queue->queue_lock); ++ ++ spin_lock(&fsl_comp->qchan->vchan.lock); ++ vchan_cookie_complete(&fsl_comp->vdesc); ++ fsl_comp->qchan->status = DMA_COMPLETE; ++ spin_unlock(&fsl_comp->qchan->vchan.lock); ++ } ++ return 0; ++} ++ ++static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) ++{ ++ struct fsl_qdma_engine *fsl_qdma = dev_id; ++ unsigned int intr; ++ void __iomem *status = fsl_qdma->status_base; ++ ++ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); ++ ++ if (intr) ++ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); ++ ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) ++{ ++ struct fsl_qdma_engine *fsl_qdma = dev_id; ++ unsigned int intr, reg; ++ void __iomem *block = fsl_qdma->block_base; ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ ++ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); ++ ++ if ((intr & FSL_QDMA_CQIDR_SQT) != 0) ++ intr = fsl_qdma_queue_transfer_complete(fsl_qdma); ++ ++ if (intr != 0) { ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg |= FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); ++ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); ++ } ++ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ return IRQ_HANDLED; ++} ++ ++static int ++fsl_qdma_irq_init(struct platform_device *pdev, ++ struct fsl_qdma_engine *fsl_qdma) ++{ ++ int ret; ++ ++ fsl_qdma->error_irq = platform_get_irq_byname(pdev, ++ "qdma-error"); ++ if (fsl_qdma->error_irq < 0) { ++ dev_err(&pdev->dev, "Can't get qdma controller irq.\n"); ++ return fsl_qdma->error_irq; ++ } ++ ++ fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue"); ++ if (fsl_qdma->queue_irq < 0) { ++ dev_err(&pdev->dev, "Can't get qdma queue irq.\n"); ++ return fsl_qdma->queue_irq; ++ } ++ ++ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, ++ fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n"); ++ return ret; ++ } ++ ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq, ++ fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) ++{ ++ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; ++ struct fsl_qdma_queue *temp; ++ void __iomem *ctrl = fsl_qdma->ctrl_base; ++ void __iomem *status = fsl_qdma->status_base; ++ void __iomem *block = fsl_qdma->block_base; ++ int i, ret; ++ u32 reg; ++ ++ /* Try to halt the qDMA engine first. */ ++ ret = fsl_qdma_halt(fsl_qdma); ++ if (ret) { ++ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); ++ return ret; ++ } ++ ++ /* ++ * Clear the command queue interrupt detect register for all queues. ++ */ ++ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); ++ ++ for (i = 0; i < fsl_qdma->n_queues; i++) { ++ temp = fsl_queue + i; ++ /* ++ * Initialize Command Queue registers to point to the first ++ * command descriptor in memory. ++ * Dequeue Pointer Address Registers ++ * Enqueue Pointer Address Registers ++ */ ++ qdma_writel(fsl_qdma, temp->bus_addr, ++ block + FSL_QDMA_BCQDPA_SADDR(i)); ++ qdma_writel(fsl_qdma, temp->bus_addr, ++ block + FSL_QDMA_BCQEPA_SADDR(i)); ++ ++ /* Initialize the queue mode. */ ++ reg = FSL_QDMA_BCQMR_EN; ++ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4); ++ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6); ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); ++ } ++ ++ /* ++ * Workaround for erratum: ERR010812. ++ * We must enable XOFF to avoid the enqueue rejection occurs. ++ * Setting SQCCMR ENTER_WM to 0x20. ++ */ ++ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, ++ block + FSL_QDMA_SQCCMR); ++ /* ++ * Initialize status queue registers to point to the first ++ * command descriptor in memory. ++ * Dequeue Pointer Address Registers ++ * Enqueue Pointer Address Registers ++ */ ++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, ++ block + FSL_QDMA_SQEPAR); ++ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, ++ block + FSL_QDMA_SQDPAR); ++ /* Initialize status queue interrupt. */ ++ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, ++ block + FSL_QDMA_BCQIER(0)); ++ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5) ++ | 0x8000, ++ block + FSL_QDMA_BSQICR); ++ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE, ++ block + FSL_QDMA_CQIER); ++ /* Initialize controller interrupt register. */ ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); ++ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER); ++ ++ /* Initialize the status queue mode. */ ++ reg = FSL_QDMA_BSQMR_EN; ++ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6); ++ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); ++ ++ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); ++ reg &= ~FSL_QDMA_DMR_DQD; ++ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); ++ ++ return 0; ++} ++ ++static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg( ++ struct dma_chan *chan, ++ struct scatterlist *dst_sg, unsigned int dst_nents, ++ struct scatterlist *src_sg, unsigned int src_nents, ++ unsigned long flags) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_comp *fsl_comp; ++ ++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, ++ dst_nents, ++ src_nents); ++ fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents); ++ ++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); ++} ++ ++static struct dma_async_tx_descriptor * ++fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, ++ dma_addr_t src, size_t len, unsigned long flags) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_comp *fsl_comp; ++ ++ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0); ++ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); ++ ++ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); ++} ++ ++static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) ++{ ++ void __iomem *block = fsl_chan->qdma->block_base; ++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; ++ struct fsl_qdma_comp *fsl_comp; ++ struct virt_dma_desc *vdesc; ++ u32 reg; ++ ++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id)); ++ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF)) ++ return; ++ vdesc = vchan_next_desc(&fsl_chan->vchan); ++ if (!vdesc) ++ return; ++ list_del(&vdesc->node); ++ fsl_comp = to_fsl_qdma_comp(vdesc); ++ ++ memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16); ++ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) ++ fsl_queue->virt_head = fsl_queue->cq; ++ ++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); ++ barrier(); ++ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id)); ++ reg |= FSL_QDMA_BCQMR_EI; ++ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); ++ fsl_chan->status = DMA_IN_PROGRESS; ++} ++ ++static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan, ++ dma_cookie_t cookie, struct dma_tx_state *txstate) ++{ ++ return dma_cookie_status(chan, cookie, txstate); ++} ++ ++static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) ++{ ++ struct fsl_qdma_comp *fsl_comp; ++ struct fsl_qdma_queue *fsl_queue; ++ struct fsl_qdma_sg *sg_block; ++ unsigned long flags; ++ unsigned int i; ++ ++ fsl_comp = to_fsl_qdma_comp(vdesc); ++ fsl_queue = fsl_comp->qchan->queue; ++ ++ if (fsl_comp->sg_block) { ++ for (i = 0; i < fsl_comp->sg_block_src + ++ fsl_comp->sg_block_dst; i++) { ++ sg_block = fsl_comp->sg_block + i; ++ dma_pool_free(fsl_queue->sg_pool, ++ sg_block->virt_addr, ++ sg_block->bus_addr); ++ } ++ kfree(fsl_comp->sg_block); ++ } ++ ++ spin_lock_irqsave(&fsl_queue->queue_lock, flags); ++ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free); ++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); ++} ++ ++static void fsl_qdma_issue_pending(struct dma_chan *chan) ++{ ++ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); ++ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fsl_queue->queue_lock, flags); ++ spin_lock(&fsl_chan->vchan.lock); ++ if (vchan_issue_pending(&fsl_chan->vchan)) ++ fsl_qdma_enqueue_desc(fsl_chan); ++ spin_unlock(&fsl_chan->vchan.lock); ++ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); ++} ++ ++static int fsl_qdma_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_engine *fsl_qdma; ++ struct fsl_qdma_chan *fsl_chan; ++ struct resource *res; ++ unsigned int len, chans, queues; ++ int ret, i; ++ ++ ret = of_property_read_u32(np, "channels", &chans); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get channels.\n"); ++ return ret; ++ } ++ ++ len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans; ++ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); ++ if (!fsl_qdma) ++ return -ENOMEM; ++ ++ ret = of_property_read_u32(np, "queues", &queues); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't get queues.\n"); ++ return ret; ++ } ++ ++ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues); ++ if (!fsl_qdma->queue) ++ return -ENOMEM; ++ ++ fsl_qdma->status = fsl_qdma_prep_status_queue(pdev); ++ if (!fsl_qdma->status) ++ return -ENOMEM; ++ ++ fsl_qdma->n_chans = chans; ++ fsl_qdma->n_queues = queues; ++ mutex_init(&fsl_qdma->fsl_qdma_mutex); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->ctrl_base)) ++ return PTR_ERR(fsl_qdma->ctrl_base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->status_base)) ++ return PTR_ERR(fsl_qdma->status_base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(fsl_qdma->block_base)) ++ return PTR_ERR(fsl_qdma->block_base); ++ ++ ret = fsl_qdma_irq_init(pdev, fsl_qdma); ++ if (ret) ++ return ret; ++ ++ fsl_qdma->big_endian = of_property_read_bool(np, "big-endian"); ++ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); ++ for (i = 0; i < fsl_qdma->n_chans; i++) { ++ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; ++ ++ fsl_chan->qdma = fsl_qdma; ++ fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues; ++ fsl_chan->vchan.desc_free = fsl_qdma_free_desc; ++ INIT_LIST_HEAD(&fsl_chan->qcomp); ++ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); ++ } ++ for (i = 0; i < fsl_qdma->n_queues; i++) ++ fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i); ++ ++ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); ++ dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask); ++ ++ fsl_qdma->dma_dev.dev = &pdev->dev; ++ fsl_qdma->dma_dev.device_alloc_chan_resources ++ = fsl_qdma_alloc_chan_resources; ++ fsl_qdma->dma_dev.device_free_chan_resources ++ = fsl_qdma_free_chan_resources; ++ fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status; ++ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; ++ fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg; ++ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; ++ ++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); ++ ++ platform_set_drvdata(pdev, fsl_qdma); ++ ++ ret = dma_async_device_register(&fsl_qdma->dma_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n"); ++ return ret; ++ } ++ ++ ret = fsl_qdma_reg_init(fsl_qdma); ++ if (ret) { ++ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); ++ return ret; ++ } ++ ++ ++ return 0; ++} ++ ++static int fsl_qdma_remove(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); ++ struct fsl_qdma_queue *queue_temp; ++ struct fsl_qdma_queue *status = fsl_qdma->status; ++ struct fsl_qdma_comp *comp_temp, *_comp_temp; ++ int i; ++ ++ of_dma_controller_free(np); ++ dma_async_device_unregister(&fsl_qdma->dma_dev); ++ ++ /* Free descriptor areas */ ++ for (i = 0; i < fsl_qdma->n_queues; i++) { ++ queue_temp = fsl_qdma->queue + i; ++ list_for_each_entry_safe(comp_temp, _comp_temp, ++ &queue_temp->comp_used, list) { ++ dma_pool_free(queue_temp->comp_pool, ++ comp_temp->virt_addr, ++ comp_temp->bus_addr); ++ list_del(&comp_temp->list); ++ kfree(comp_temp); ++ } ++ list_for_each_entry_safe(comp_temp, _comp_temp, ++ &queue_temp->comp_free, list) { ++ dma_pool_free(queue_temp->comp_pool, ++ comp_temp->virt_addr, ++ comp_temp->bus_addr); ++ list_del(&comp_temp->list); ++ kfree(comp_temp); ++ } ++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * ++ queue_temp->n_cq, queue_temp->cq, ++ queue_temp->bus_addr); ++ dma_pool_destroy(queue_temp->comp_pool); ++ } ++ ++ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * ++ status->n_cq, status->cq, status->bus_addr); ++ return 0; ++} ++ ++static const struct of_device_id fsl_qdma_dt_ids[] = { ++ { .compatible = "fsl,ls1021a-qdma", }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids); ++ ++static struct platform_driver fsl_qdma_driver = { ++ .driver = { ++ .name = "fsl-qdma", ++ .owner = THIS_MODULE, ++ .of_match_table = fsl_qdma_dt_ids, ++ }, ++ .probe = fsl_qdma_probe, ++ .remove = fsl_qdma_remove, ++}; ++ ++static int __init fsl_qdma_init(void) ++{ ++ return platform_driver_register(&fsl_qdma_driver); ++} ++subsys_initcall(fsl_qdma_init); ++ ++static void __exit fsl_qdma_exit(void) ++{ ++ platform_driver_unregister(&fsl_qdma_driver); ++} ++module_exit(fsl_qdma_exit); ++ ++MODULE_ALIAS("platform:fsl-qdma"); ++MODULE_DESCRIPTION("Freescale qDMA engine driver"); ++MODULE_LICENSE("GPL v2"); +-- +2.14.1 + |