From 24de7c29e5f70863a2962de79521b60bce4dce2f Mon Sep 17 00:00:00 2001 From: Piotr Dymacz Date: Sun, 3 Mar 2019 20:23:31 +0100 Subject: ipq40xx: backport I2C QUP driver changes from 4.17 Backport below changes for I2C QUP driver from v4.17: 0668bc44a426 i2c: qup: fix copyrights and update to SPDX identifier 7239872fb340 i2c: qup: fixed releasing dma without flush operation completion eb422b539c1f i2c: qup: minor code reorganization for use_dma 6d5f37f166bb i2c: qup: remove redundant variables for BAM SG count c5adc0fa63a9 i2c: qup: schedule EOT and FLUSH tags at the end of transfer 7e6c35fe602d i2c: qup: fix the transfer length for BAM RX EOT FLUSH tags 3f450d3eea14 i2c: qup: proper error handling for i2c error in BAM mode 08f15963bc75 i2c: qup: use the complete transfer length to choose DMA mode ecb6e1e5f435 i2c: qup: change completion timeout according to transfer length 6f2f0f6465ac i2c: qup: fix buffer overflow for multiple msg of maximum xfer len f7714b4e451b i2c: qup: send NACK for last read sub transfers fbfab1ab0658 i2c: qup: reorganization of driver code to remove polling for qup v1 7545c7dba169 i2c: qup: reorganization of driver code to remove polling for qup v2 This fixes various I2C issues observed on AP120C-AC board equipped with Atmel/Microchip AT97SC3205T TPM module. Tested-by: Christian Lamparter Signed-off-by: Piotr Dymacz --- ...-copyrights-and-update-to-SPDX-identifier.patch | 36 + ...xed-releasing-dma-without-flush-operation.patch | 44 + ...qup-minor-code-reorganization-for-use_dma.patch | 76 ++ ...move-redundant-variables-for-BAM-SG-count.patch | 174 +++ ...edule-EOT-and-FLUSH-tags-at-the-end-of-tr.patch | 126 +++ ...-the-transfer-length-for-BAM-RX-EOT-FLUSH.patch | 33 + ...per-error-handling-for-i2c-error-in-BAM-m.patch | 90 ++ ...-the-complete-transfer-length-to-choose-D.patch | 54 + ...nge-completion-timeout-according-to-trans.patch | 61 ++ ...-buffer-overflow-for-multiple-msg-of-maxi.patch | 311 ++++++ ...qup-send-NACK-for-last-read-sub-transfers.patch | 43 + ...rganization-of-driver-code-to-remove-poll.patch | 579 ++++++++++ ...rganization-of-driver-code-to-remove-poll.patch | 1154 ++++++++++++++++++++ 13 files changed, 2781 insertions(+) create mode 100644 target/linux/ipq40xx/patches-4.14/088-0001-i2c-qup-fix-copyrights-and-update-to-SPDX-identifier.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0002-i2c-qup-fixed-releasing-dma-without-flush-operation.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0003-i2c-qup-minor-code-reorganization-for-use_dma.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0004-i2c-qup-remove-redundant-variables-for-BAM-SG-count.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0005-i2c-qup-schedule-EOT-and-FLUSH-tags-at-the-end-of-tr.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0006-i2c-qup-fix-the-transfer-length-for-BAM-RX-EOT-FLUSH.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0007-i2c-qup-proper-error-handling-for-i2c-error-in-BAM-m.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0008-i2c-qup-use-the-complete-transfer-length-to-choose-D.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0009-i2c-qup-change-completion-timeout-according-to-trans.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0010-i2c-qup-fix-buffer-overflow-for-multiple-msg-of-maxi.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0011-i2c-qup-send-NACK-for-last-read-sub-transfers.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0012-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch create mode 100644 target/linux/ipq40xx/patches-4.14/088-0013-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch (limited to 'target/linux/ipq40xx') diff --git a/target/linux/ipq40xx/patches-4.14/088-0001-i2c-qup-fix-copyrights-and-update-to-SPDX-identifier.patch b/target/linux/ipq40xx/patches-4.14/088-0001-i2c-qup-fix-copyrights-and-update-to-SPDX-identifier.patch new file mode 100644 index 0000000000..b2b317245f --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0001-i2c-qup-fix-copyrights-and-update-to-SPDX-identifier.patch @@ -0,0 +1,36 @@ +From 0668bc44a42672626666e4f66aa1f2c22528e8a5 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:50 +0530 +Subject: [PATCH 01/13] i2c: qup: fix copyrights and update to SPDX identifier + +The file has been updated from 2016 to 2018 so fixed the +copyright years. + +Signed-off-by: Abhishek Sahu +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 13 ++----------- + 1 file changed, 2 insertions(+), 11 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -1,17 +1,8 @@ ++// SPDX-License-Identifier: GPL-2.0 + /* +- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2009-2013, 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, Sony Mobile Communications AB. + * +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License version 2 and +- * only version 2 as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * + */ + + #include diff --git a/target/linux/ipq40xx/patches-4.14/088-0002-i2c-qup-fixed-releasing-dma-without-flush-operation.patch b/target/linux/ipq40xx/patches-4.14/088-0002-i2c-qup-fixed-releasing-dma-without-flush-operation.patch new file mode 100644 index 0000000000..25d0366ec6 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0002-i2c-qup-fixed-releasing-dma-without-flush-operation.patch @@ -0,0 +1,44 @@ +From 7239872fb3400b21a8f5547257f9f86455867bd6 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:51 +0530 +Subject: [PATCH 02/13] i2c: qup: fixed releasing dma without flush operation + completion +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The QUP BSLP BAM generates the following error sometimes if the +current I2C DMA transfer fails and the flush operation has been +scheduled + + “bam-dma-engine 7884000.dma: Cannot free busy channel” + +If any I2C error comes during BAM DMA transfer, then the QUP I2C +interrupt will be generated and the flush operation will be +carried out to make I2C consume all scheduled DMA transfer. +Currently, the same completion structure is being used for BAM +transfer which has already completed without reinit. It will make +flush operation wait_for_completion_timeout completed immediately +and will proceed for freeing the DMA resources where the +descriptors are still in process. + +Signed-off-by: Abhishek Sahu +Acked-by: Sricharan R +Reviewed-by: Austin Christ +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -835,6 +835,8 @@ static int qup_i2c_bam_do_xfer(struct qu + } + + if (ret || qup->bus_err || qup->qup_err) { ++ reinit_completion(&qup->xfer); ++ + if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { + dev_err(qup->dev, "change to run state timed out"); + goto desc_err; diff --git a/target/linux/ipq40xx/patches-4.14/088-0003-i2c-qup-minor-code-reorganization-for-use_dma.patch b/target/linux/ipq40xx/patches-4.14/088-0003-i2c-qup-minor-code-reorganization-for-use_dma.patch new file mode 100644 index 0000000000..a6cbfcf2b0 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0003-i2c-qup-minor-code-reorganization-for-use_dma.patch @@ -0,0 +1,76 @@ +From eb422b539c1f39faf576826b54be93e84d9cb32a Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:52 +0530 +Subject: [PATCH 03/13] i2c: qup: minor code reorganization for use_dma + +1. Assigns use_dma in qup_dev structure itself which will + help in subsequent patches to determine the mode in IRQ handler. +2. Does minor code reorganization for loops to reduce the + unnecessary comparison and assignment. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Austin Christ +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -181,6 +181,8 @@ struct qup_i2c_dev { + + /* dma parameters */ + bool is_dma; ++ /* To check if the current transfer is using DMA */ ++ bool use_dma; + struct dma_pool *dpool; + struct qup_i2c_tag start_tag; + struct qup_i2c_bam brx; +@@ -1288,7 +1290,7 @@ static int qup_i2c_xfer_v2(struct i2c_ad + int num) + { + struct qup_i2c_dev *qup = i2c_get_adapdata(adap); +- int ret, len, idx = 0, use_dma = 0; ++ int ret, len, idx = 0; + + qup->bus_err = 0; + qup->qup_err = 0; +@@ -1317,13 +1319,12 @@ static int qup_i2c_xfer_v2(struct i2c_ad + len = (msgs[idx].len > qup->out_fifo_sz) || + (msgs[idx].len > qup->in_fifo_sz); + +- if ((!is_vmalloc_addr(msgs[idx].buf)) && len) { +- use_dma = 1; +- } else { +- use_dma = 0; ++ if (is_vmalloc_addr(msgs[idx].buf) || !len) + break; +- } + } ++ ++ if (idx == num) ++ qup->use_dma = true; + } + + idx = 0; +@@ -1347,15 +1348,17 @@ static int qup_i2c_xfer_v2(struct i2c_ad + + reinit_completion(&qup->xfer); + +- if (use_dma) { ++ if (qup->use_dma) { + ret = qup_i2c_bam_xfer(adap, &msgs[idx], num); ++ qup->use_dma = false; ++ break; + } else { + if (msgs[idx].flags & I2C_M_RD) + ret = qup_i2c_read_one_v2(qup, &msgs[idx]); + else + ret = qup_i2c_write_one_v2(qup, &msgs[idx]); + } +- } while ((idx++ < (num - 1)) && !use_dma && !ret); ++ } while ((idx++ < (num - 1)) && !ret); + + if (!ret) + ret = qup_i2c_change_state(qup, QUP_RESET_STATE); diff --git a/target/linux/ipq40xx/patches-4.14/088-0004-i2c-qup-remove-redundant-variables-for-BAM-SG-count.patch b/target/linux/ipq40xx/patches-4.14/088-0004-i2c-qup-remove-redundant-variables-for-BAM-SG-count.patch new file mode 100644 index 0000000000..e18af41789 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0004-i2c-qup-remove-redundant-variables-for-BAM-SG-count.patch @@ -0,0 +1,174 @@ +From 6d5f37f166bb07b04b4d42e9d1f5427b7931cd3c Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:53 +0530 +Subject: [PATCH 04/13] i2c: qup: remove redundant variables for BAM SG count + +The rx_nents and tx_nents are redundant. rx_buf and tx_buf can +be used for total number of SG entries. Since rx_buf and tx_buf +give the impression that it is buffer instead of count so rename +it to tx_cnt and rx_cnt for giving it more meaningful variable +name. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Austin Christ +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 42 ++++++++++++++++-------------------- + 1 file changed, 18 insertions(+), 24 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -683,8 +683,8 @@ static int qup_i2c_bam_do_xfer(struct qu + struct dma_async_tx_descriptor *txd, *rxd = NULL; + int ret = 0, idx = 0, limit = QUP_READ_LIMIT; + dma_cookie_t cookie_rx, cookie_tx; +- u32 rx_nents = 0, tx_nents = 0, len, blocks, rem; +- u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0; ++ u32 len, blocks, rem; ++ u32 i, tlen, tx_len, tx_cnt = 0, rx_cnt = 0, off = 0; + u8 *tags; + + while (idx < num) { +@@ -698,9 +698,6 @@ static int qup_i2c_bam_do_xfer(struct qu + rem = msg->len - (blocks - 1) * limit; + + if (msg->flags & I2C_M_RD) { +- rx_nents += (blocks * 2) + 1; +- tx_nents += 1; +- + while (qup->blk.pos < blocks) { + tlen = (i == (blocks - 1)) ? rem : limit; + tags = &qup->start_tag.start[off + len]; +@@ -708,14 +705,14 @@ static int qup_i2c_bam_do_xfer(struct qu + qup->blk.data_len -= tlen; + + /* scratch buf to read the start and len tags */ +- ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], ++ ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], + &qup->brx.tag.start[0], + 2, qup, DMA_FROM_DEVICE); + + if (ret) + return ret; + +- ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], ++ ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], + &msg->buf[limit * i], + tlen, qup, + DMA_FROM_DEVICE); +@@ -725,7 +722,7 @@ static int qup_i2c_bam_do_xfer(struct qu + i++; + qup->blk.pos = i; + } +- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], ++ ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], + &qup->start_tag.start[off], + len, qup, DMA_TO_DEVICE); + if (ret) +@@ -733,28 +730,26 @@ static int qup_i2c_bam_do_xfer(struct qu + + off += len; + /* scratch buf to read the BAM EOT and FLUSH tags */ +- ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], ++ ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], + &qup->brx.tag.start[0], + 2, qup, DMA_FROM_DEVICE); + if (ret) + return ret; + } else { +- tx_nents += (blocks * 2); +- + while (qup->blk.pos < blocks) { + tlen = (i == (blocks - 1)) ? rem : limit; + tags = &qup->start_tag.start[off + tx_len]; + len = qup_i2c_set_tags(tags, qup, msg, 1); + qup->blk.data_len -= tlen; + +- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], ++ ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], + tags, len, + qup, DMA_TO_DEVICE); + if (ret) + return ret; + + tx_len += len; +- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], ++ ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], + &msg->buf[limit * i], + tlen, qup, DMA_TO_DEVICE); + if (ret) +@@ -766,26 +761,25 @@ static int qup_i2c_bam_do_xfer(struct qu + + if (idx == (num - 1)) { + len = 1; +- if (rx_nents) { ++ if (rx_cnt) { + qup->btx.tag.start[0] = + QUP_BAM_INPUT_EOT; + len++; + } + qup->btx.tag.start[len - 1] = + QUP_BAM_FLUSH_STOP; +- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], ++ ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], + &qup->btx.tag.start[0], + len, qup, DMA_TO_DEVICE); + if (ret) + return ret; +- tx_nents += 1; + } + } + idx++; + msg++; + } + +- txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_nents, ++ txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_PREP_FENCE); + if (!txd) { +@@ -794,7 +788,7 @@ static int qup_i2c_bam_do_xfer(struct qu + goto desc_err; + } + +- if (!rx_nents) { ++ if (!rx_cnt) { + txd->callback = qup_i2c_bam_cb; + txd->callback_param = qup; + } +@@ -807,9 +801,9 @@ static int qup_i2c_bam_do_xfer(struct qu + + dma_async_issue_pending(qup->btx.dma); + +- if (rx_nents) { ++ if (rx_cnt) { + rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg, +- rx_nents, DMA_DEV_TO_MEM, ++ rx_cnt, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!rxd) { + dev_err(qup->dev, "failed to get rx desc\n"); +@@ -844,7 +838,7 @@ static int qup_i2c_bam_do_xfer(struct qu + goto desc_err; + } + +- if (rx_nents) ++ if (rx_cnt) + writel(QUP_BAM_INPUT_EOT, + qup->base + QUP_OUT_FIFO_BASE); + +@@ -862,10 +856,10 @@ static int qup_i2c_bam_do_xfer(struct qu + } + + desc_err: +- dma_unmap_sg(qup->dev, qup->btx.sg, tx_nents, DMA_TO_DEVICE); ++ dma_unmap_sg(qup->dev, qup->btx.sg, tx_cnt, DMA_TO_DEVICE); + +- if (rx_nents) +- dma_unmap_sg(qup->dev, qup->brx.sg, rx_nents, ++ if (rx_cnt) ++ dma_unmap_sg(qup->dev, qup->brx.sg, rx_cnt, + DMA_FROM_DEVICE); + + return ret; diff --git a/target/linux/ipq40xx/patches-4.14/088-0005-i2c-qup-schedule-EOT-and-FLUSH-tags-at-the-end-of-tr.patch b/target/linux/ipq40xx/patches-4.14/088-0005-i2c-qup-schedule-EOT-and-FLUSH-tags-at-the-end-of-tr.patch new file mode 100644 index 0000000000..cbede84cfe --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0005-i2c-qup-schedule-EOT-and-FLUSH-tags-at-the-end-of-tr.patch @@ -0,0 +1,126 @@ +From c5adc0fa63a930e3313c74bb7c1d4d158130eb41 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:54 +0530 +Subject: [PATCH 05/13] i2c: qup: schedule EOT and FLUSH tags at the end of + transfer + +The role of FLUSH and EOT tag is to flush already scheduled +descriptors in BAM HW in case of error. EOT is required only +when descriptors are scheduled in RX FIFO. If all the messages +are WRITE, then only FLUSH tag will be used. + +A single BAM transfer can have multiple read and write messages. +The EOT and FLUSH tags should be scheduled at the end of BAM HW +descriptors. Since the READ and WRITE can be present in any order +so for some of the cases, these tags are not being written +correctly. + +Following is one of the example + + READ, READ, READ, READ + +Currently EOT and FLUSH tags are being written after each READ. +If QUP gets NACK for first READ itself, then flush will be +triggered. It will look for first FLUSH tag in TX FIFO and will +stop there so only descriptors for first READ descriptors be +flushed. All the scheduled descriptors should be cleared to +generate BAM DMA completion. + +Now this patch is scheduling FLUSH and EOT only once after all the +descriptors. So, flush will clear all the scheduled descriptors and +BAM will generate the completion interrupt. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Sricharan R +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 39 ++++++++++++++++++++++-------------- + 1 file changed, 24 insertions(+), 15 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -551,7 +551,7 @@ static int qup_i2c_set_tags_smb(u16 addr + } + + static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup, +- struct i2c_msg *msg, int is_dma) ++ struct i2c_msg *msg) + { + u16 addr = i2c_8bit_addr_from_msg(msg); + int len = 0; +@@ -592,11 +592,6 @@ static int qup_i2c_set_tags(u8 *tags, st + else + tags[len++] = data_len; + +- if ((msg->flags & I2C_M_RD) && last && is_dma) { +- tags[len++] = QUP_BAM_INPUT_EOT; +- tags[len++] = QUP_BAM_FLUSH_STOP; +- } +- + return len; + } + +@@ -605,7 +600,7 @@ static int qup_i2c_issue_xfer_v2(struct + int data_len = 0, tag_len, index; + int ret; + +- tag_len = qup_i2c_set_tags(qup->blk.tags, qup, msg, 0); ++ tag_len = qup_i2c_set_tags(qup->blk.tags, qup, msg); + index = msg->len - qup->blk.data_len; + + /* only tags are written for read */ +@@ -701,7 +696,7 @@ static int qup_i2c_bam_do_xfer(struct qu + while (qup->blk.pos < blocks) { + tlen = (i == (blocks - 1)) ? rem : limit; + tags = &qup->start_tag.start[off + len]; +- len += qup_i2c_set_tags(tags, qup, msg, 1); ++ len += qup_i2c_set_tags(tags, qup, msg); + qup->blk.data_len -= tlen; + + /* scratch buf to read the start and len tags */ +@@ -729,17 +724,11 @@ static int qup_i2c_bam_do_xfer(struct qu + return ret; + + off += len; +- /* scratch buf to read the BAM EOT and FLUSH tags */ +- ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], +- &qup->brx.tag.start[0], +- 2, qup, DMA_FROM_DEVICE); +- if (ret) +- return ret; + } else { + while (qup->blk.pos < blocks) { + tlen = (i == (blocks - 1)) ? rem : limit; + tags = &qup->start_tag.start[off + tx_len]; +- len = qup_i2c_set_tags(tags, qup, msg, 1); ++ len = qup_i2c_set_tags(tags, qup, msg); + qup->blk.data_len -= tlen; + + ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], +@@ -779,6 +768,26 @@ static int qup_i2c_bam_do_xfer(struct qu + msg++; + } + ++ /* schedule the EOT and FLUSH I2C tags */ ++ len = 1; ++ if (rx_cnt) { ++ qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT; ++ len++; ++ ++ /* scratch buf to read the BAM EOT and FLUSH tags */ ++ ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], ++ &qup->brx.tag.start[0], ++ 2, qup, DMA_FROM_DEVICE); ++ if (ret) ++ return ret; ++ } ++ ++ qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP; ++ ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0], ++ len, qup, DMA_TO_DEVICE); ++ if (ret) ++ return ret; ++ + txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_PREP_FENCE); diff --git a/target/linux/ipq40xx/patches-4.14/088-0006-i2c-qup-fix-the-transfer-length-for-BAM-RX-EOT-FLUSH.patch b/target/linux/ipq40xx/patches-4.14/088-0006-i2c-qup-fix-the-transfer-length-for-BAM-RX-EOT-FLUSH.patch new file mode 100644 index 0000000000..6eb2d6a7b1 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0006-i2c-qup-fix-the-transfer-length-for-BAM-RX-EOT-FLUSH.patch @@ -0,0 +1,33 @@ +From 7e6c35fe602df6821b3e7db5b1ba656162750fda Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:55 +0530 +Subject: [PATCH 06/13] i2c: qup: fix the transfer length for BAM RX EOT FLUSH + tags + +In case of FLUSH operation, BAM copies INPUT EOT FLUSH (0x94) +instead of normal EOT (0x93) tag in input data stream when an +input EOT tag is received during flush operation. So only one tag +will be written instead of 2 separate tags. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -774,10 +774,10 @@ static int qup_i2c_bam_do_xfer(struct qu + qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT; + len++; + +- /* scratch buf to read the BAM EOT and FLUSH tags */ ++ /* scratch buf to read the BAM EOT FLUSH tags */ + ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], + &qup->brx.tag.start[0], +- 2, qup, DMA_FROM_DEVICE); ++ 1, qup, DMA_FROM_DEVICE); + if (ret) + return ret; + } diff --git a/target/linux/ipq40xx/patches-4.14/088-0007-i2c-qup-proper-error-handling-for-i2c-error-in-BAM-m.patch b/target/linux/ipq40xx/patches-4.14/088-0007-i2c-qup-proper-error-handling-for-i2c-error-in-BAM-m.patch new file mode 100644 index 0000000000..a86f144caf --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0007-i2c-qup-proper-error-handling-for-i2c-error-in-BAM-m.patch @@ -0,0 +1,90 @@ +From 3f450d3eea14799b14192231840c1753a660f150 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:56 +0530 +Subject: [PATCH 07/13] i2c: qup: proper error handling for i2c error in BAM + mode + +Currently the i2c error handling in BAM mode is not working +properly in stress condition. + +1. After an error, the FIFO are being written with FLUSH and + EOT tags which should not be required since already these tags + have been written in BAM descriptor itself. + +2. QUP state is being moved to RESET in IRQ handler in case + of error. When QUP HW encounters an error in BAM mode then it + moves the QUP STATE to PAUSE state. In this case, I2C_FLUSH + command needs to be executed while moving to RUN_STATE by writing + to the QUP_STATE register with the I2C_FLUSH bit set to 1. + +3. In Error case, sometimes, QUP generates more than one + interrupt which will trigger the complete again. After an error, + the flush operation will be scheduled after doing + reinit_completion which should be triggered by BAM IRQ callback. + If the second QUP IRQ comes during this time then it will call + the complete and the transfer function will assume the all the + BAM HW descriptors have been completed. + +4. The release DMA is being called after each error which + will free the DMA tx and rx channels. The error like NACK is very + common in I2C transfer and every time this will be overhead. Now, + since the error handling is proper so this release channel can be + completely avoided. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Sricharan R +Reviewed-by: Austin Christ +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -219,9 +219,24 @@ static irqreturn_t qup_i2c_interrupt(int + if (bus_err) + writel(bus_err, qup->base + QUP_I2C_STATUS); + ++ /* ++ * Check for BAM mode and returns if already error has come for current ++ * transfer. In Error case, sometimes, QUP generates more than one ++ * interrupt. ++ */ ++ if (qup->use_dma && (qup->qup_err || qup->bus_err)) ++ return IRQ_HANDLED; ++ + /* Reset the QUP State in case of error */ + if (qup_err || bus_err) { +- writel(QUP_RESET_STATE, qup->base + QUP_STATE); ++ /* ++ * Don’t reset the QUP state in case of BAM mode. The BAM ++ * flush operation needs to be scheduled in transfer function ++ * which will clear the remaining schedule descriptors in BAM ++ * HW FIFO and generates the BAM interrupt. ++ */ ++ if (!qup->use_dma) ++ writel(QUP_RESET_STATE, qup->base + QUP_STATE); + goto done; + } + +@@ -847,20 +862,12 @@ static int qup_i2c_bam_do_xfer(struct qu + goto desc_err; + } + +- if (rx_cnt) +- writel(QUP_BAM_INPUT_EOT, +- qup->base + QUP_OUT_FIFO_BASE); +- +- writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE); +- + qup_i2c_flush(qup); + + /* wait for remaining interrupts to occur */ + if (!wait_for_completion_timeout(&qup->xfer, HZ)) + dev_err(qup->dev, "flush timed out\n"); + +- qup_i2c_rel_dma(qup); +- + ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; + } + diff --git a/target/linux/ipq40xx/patches-4.14/088-0008-i2c-qup-use-the-complete-transfer-length-to-choose-D.patch b/target/linux/ipq40xx/patches-4.14/088-0008-i2c-qup-use-the-complete-transfer-length-to-choose-D.patch new file mode 100644 index 0000000000..3d68695588 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0008-i2c-qup-use-the-complete-transfer-length-to-choose-D.patch @@ -0,0 +1,54 @@ +From 08f15963bc751bc818294c0f75a9aaca299c4052 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:57 +0530 +Subject: [PATCH 08/13] i2c: qup: use the complete transfer length to choose + DMA mode + +Currently each message length in complete transfer is being +checked for determining DMA mode and if any of the message length +is less than FIFO length then non DMA mode is being used which +will increase overhead. DMA can be used for any length and it +should be determined with complete transfer length. Now, this +patch selects DMA mode if the total length is greater than FIFO +length. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Austin Christ +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -1300,7 +1300,8 @@ static int qup_i2c_xfer_v2(struct i2c_ad + int num) + { + struct qup_i2c_dev *qup = i2c_get_adapdata(adap); +- int ret, len, idx = 0; ++ int ret, idx = 0; ++ unsigned int total_len = 0; + + qup->bus_err = 0; + qup->qup_err = 0; +@@ -1326,14 +1327,14 @@ static int qup_i2c_xfer_v2(struct i2c_ad + goto out; + } + +- len = (msgs[idx].len > qup->out_fifo_sz) || +- (msgs[idx].len > qup->in_fifo_sz); +- +- if (is_vmalloc_addr(msgs[idx].buf) || !len) ++ if (is_vmalloc_addr(msgs[idx].buf)) + break; ++ ++ total_len += msgs[idx].len; + } + +- if (idx == num) ++ if (idx == num && (total_len > qup->out_fifo_sz || ++ total_len > qup->in_fifo_sz)) + qup->use_dma = true; + } + diff --git a/target/linux/ipq40xx/patches-4.14/088-0009-i2c-qup-change-completion-timeout-according-to-trans.patch b/target/linux/ipq40xx/patches-4.14/088-0009-i2c-qup-change-completion-timeout-according-to-trans.patch new file mode 100644 index 0000000000..c95d20ec97 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0009-i2c-qup-change-completion-timeout-according-to-trans.patch @@ -0,0 +1,61 @@ +From ecb6e1e5f4352055a5761b945a833a925d51bf8d Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:58 +0530 +Subject: [PATCH 09/13] i2c: qup: change completion timeout according to + transfer length + +Currently the completion timeout is being taken according to +maximum transfer length which is too high if SCL is operating in +high frequency. This patch calculates timeout on the basis of +one-byte transfer time and uses the same for completion timeout. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 13 ++++++++++--- + 1 file changed, 10 insertions(+), 3 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -121,8 +121,12 @@ + #define MX_TX_RX_LEN SZ_64K + #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT) + +-/* Max timeout in ms for 32k bytes */ +-#define TOUT_MAX 300 ++/* ++ * Minimum transfer timeout for i2c transfers in seconds. It will be added on ++ * the top of maximum transfer time calculated from i2c bus speed to compensate ++ * the overheads. ++ */ ++#define TOUT_MIN 2 + + /* Default values. Use these if FW query fails */ + #define DEFAULT_CLK_FREQ 100000 +@@ -163,6 +167,7 @@ struct qup_i2c_dev { + int in_blk_sz; + + unsigned long one_byte_t; ++ unsigned long xfer_timeout; + struct qup_i2c_block blk; + + struct i2c_msg *msg; +@@ -849,7 +854,7 @@ static int qup_i2c_bam_do_xfer(struct qu + dma_async_issue_pending(qup->brx.dma); + } + +- if (!wait_for_completion_timeout(&qup->xfer, TOUT_MAX * HZ)) { ++ if (!wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout)) { + dev_err(qup->dev, "normal trans timed out\n"); + ret = -ETIMEDOUT; + } +@@ -1605,6 +1610,8 @@ nodma: + */ + one_bit_t = (USEC_PER_SEC / clk_freq) + 1; + qup->one_byte_t = one_bit_t * 9; ++ qup->xfer_timeout = TOUT_MIN * HZ + ++ usecs_to_jiffies(MX_TX_RX_LEN * qup->one_byte_t); + + dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", + qup->in_blk_sz, qup->in_fifo_sz, diff --git a/target/linux/ipq40xx/patches-4.14/088-0010-i2c-qup-fix-buffer-overflow-for-multiple-msg-of-maxi.patch b/target/linux/ipq40xx/patches-4.14/088-0010-i2c-qup-fix-buffer-overflow-for-multiple-msg-of-maxi.patch new file mode 100644 index 0000000000..e5d1edfb72 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0010-i2c-qup-fix-buffer-overflow-for-multiple-msg-of-maxi.patch @@ -0,0 +1,311 @@ +From 6f2f0f6465acbd59391c43352ff0df77df1f01db Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:44:59 +0530 +Subject: [PATCH 10/13] i2c: qup: fix buffer overflow for multiple msg of + maximum xfer len +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The BAM mode requires buffer for start tag data and tx, rx SG +list. Currently, this is being taken for maximum transfer length +(65K). But an I2C transfer can have multiple messages and each +message can be of this maximum length so the buffer overflow will +happen in this case. Since increasing buffer length won’t be +feasible since an I2C transfer can contain any number of messages +so this patch does following changes to make i2c transfers working +for multiple messages case. + +1. Calculate the required buffers for 2 maximum length messages + (65K * 2). +2. Split the descriptor formation and descriptor scheduling. + The idea is to fit as many messages in one DMA transfers for 65K + threshold value (max_xfer_sg_len). Whenever the sg_cnt is + crossing this, then schedule the BAM transfer and subsequent + transfer will again start from zero. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 194 ++++++++++++++++++++--------------- + 1 file changed, 110 insertions(+), 84 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -118,8 +118,12 @@ + #define ONE_BYTE 0x1 + #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31) + ++/* Maximum transfer length for single DMA descriptor */ + #define MX_TX_RX_LEN SZ_64K + #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT) ++/* Maximum transfer length for all DMA descriptors */ ++#define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN) ++#define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT) + + /* + * Minimum transfer timeout for i2c transfers in seconds. It will be added on +@@ -150,6 +154,7 @@ struct qup_i2c_bam { + struct qup_i2c_tag tag; + struct dma_chan *dma; + struct scatterlist *sg; ++ unsigned int sg_cnt; + }; + + struct qup_i2c_dev { +@@ -188,6 +193,8 @@ struct qup_i2c_dev { + bool is_dma; + /* To check if the current transfer is using DMA */ + bool use_dma; ++ unsigned int max_xfer_sg_len; ++ unsigned int tag_buf_pos; + struct dma_pool *dpool; + struct qup_i2c_tag start_tag; + struct qup_i2c_bam brx; +@@ -692,102 +699,87 @@ static int qup_i2c_req_dma(struct qup_i2 + return 0; + } + +-static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, +- int num) ++static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg) + { +- struct dma_async_tx_descriptor *txd, *rxd = NULL; +- int ret = 0, idx = 0, limit = QUP_READ_LIMIT; +- dma_cookie_t cookie_rx, cookie_tx; +- u32 len, blocks, rem; +- u32 i, tlen, tx_len, tx_cnt = 0, rx_cnt = 0, off = 0; ++ int ret = 0, limit = QUP_READ_LIMIT; ++ u32 len = 0, blocks, rem; ++ u32 i = 0, tlen, tx_len = 0; + u8 *tags; + +- while (idx < num) { +- tx_len = 0, len = 0, i = 0; +- +- qup->is_last = (idx == (num - 1)); ++ qup_i2c_set_blk_data(qup, msg); + +- qup_i2c_set_blk_data(qup, msg); ++ blocks = qup->blk.count; ++ rem = msg->len - (blocks - 1) * limit; + +- blocks = qup->blk.count; +- rem = msg->len - (blocks - 1) * limit; ++ if (msg->flags & I2C_M_RD) { ++ while (qup->blk.pos < blocks) { ++ tlen = (i == (blocks - 1)) ? rem : limit; ++ tags = &qup->start_tag.start[qup->tag_buf_pos + len]; ++ len += qup_i2c_set_tags(tags, qup, msg); ++ qup->blk.data_len -= tlen; ++ ++ /* scratch buf to read the start and len tags */ ++ ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], ++ &qup->brx.tag.start[0], ++ 2, qup, DMA_FROM_DEVICE); + +- if (msg->flags & I2C_M_RD) { +- while (qup->blk.pos < blocks) { +- tlen = (i == (blocks - 1)) ? rem : limit; +- tags = &qup->start_tag.start[off + len]; +- len += qup_i2c_set_tags(tags, qup, msg); +- qup->blk.data_len -= tlen; +- +- /* scratch buf to read the start and len tags */ +- ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], +- &qup->brx.tag.start[0], +- 2, qup, DMA_FROM_DEVICE); +- +- if (ret) +- return ret; +- +- ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++], +- &msg->buf[limit * i], +- tlen, qup, +- DMA_FROM_DEVICE); +- if (ret) +- return ret; ++ if (ret) ++ return ret; + +- i++; +- qup->blk.pos = i; +- } +- ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], +- &qup->start_tag.start[off], +- len, qup, DMA_TO_DEVICE); ++ ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++], ++ &msg->buf[limit * i], ++ tlen, qup, ++ DMA_FROM_DEVICE); + if (ret) + return ret; + +- off += len; +- } else { +- while (qup->blk.pos < blocks) { +- tlen = (i == (blocks - 1)) ? rem : limit; +- tags = &qup->start_tag.start[off + tx_len]; +- len = qup_i2c_set_tags(tags, qup, msg); +- qup->blk.data_len -= tlen; +- +- ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], +- tags, len, +- qup, DMA_TO_DEVICE); +- if (ret) +- return ret; +- +- tx_len += len; +- ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], +- &msg->buf[limit * i], +- tlen, qup, DMA_TO_DEVICE); +- if (ret) +- return ret; +- i++; +- qup->blk.pos = i; +- } +- off += tx_len; ++ i++; ++ qup->blk.pos = i; ++ } ++ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], ++ &qup->start_tag.start[qup->tag_buf_pos], ++ len, qup, DMA_TO_DEVICE); ++ if (ret) ++ return ret; + +- if (idx == (num - 1)) { +- len = 1; +- if (rx_cnt) { +- qup->btx.tag.start[0] = +- QUP_BAM_INPUT_EOT; +- len++; +- } +- qup->btx.tag.start[len - 1] = +- QUP_BAM_FLUSH_STOP; +- ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], +- &qup->btx.tag.start[0], +- len, qup, DMA_TO_DEVICE); +- if (ret) +- return ret; +- } ++ qup->tag_buf_pos += len; ++ } else { ++ while (qup->blk.pos < blocks) { ++ tlen = (i == (blocks - 1)) ? rem : limit; ++ tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len]; ++ len = qup_i2c_set_tags(tags, qup, msg); ++ qup->blk.data_len -= tlen; ++ ++ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], ++ tags, len, ++ qup, DMA_TO_DEVICE); ++ if (ret) ++ return ret; ++ ++ tx_len += len; ++ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++], ++ &msg->buf[limit * i], ++ tlen, qup, DMA_TO_DEVICE); ++ if (ret) ++ return ret; ++ i++; ++ qup->blk.pos = i; + } +- idx++; +- msg++; ++ ++ qup->tag_buf_pos += tx_len; + } + ++ return 0; ++} ++ ++static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup) ++{ ++ struct dma_async_tx_descriptor *txd, *rxd = NULL; ++ int ret = 0; ++ dma_cookie_t cookie_rx, cookie_tx; ++ u32 len = 0; ++ u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt; ++ + /* schedule the EOT and FLUSH I2C tags */ + len = 1; + if (rx_cnt) { +@@ -886,11 +878,19 @@ desc_err: + return ret; + } + ++static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup) ++{ ++ qup->btx.sg_cnt = 0; ++ qup->brx.sg_cnt = 0; ++ qup->tag_buf_pos = 0; ++} ++ + static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, + int num) + { + struct qup_i2c_dev *qup = i2c_get_adapdata(adap); + int ret = 0; ++ int idx = 0; + + enable_irq(qup->irq); + ret = qup_i2c_req_dma(qup); +@@ -913,9 +913,34 @@ static int qup_i2c_bam_xfer(struct i2c_a + goto out; + + writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); ++ qup_i2c_bam_clear_tag_buffers(qup); ++ ++ for (idx = 0; idx < num; idx++) { ++ qup->msg = msg + idx; ++ qup->is_last = idx == (num - 1); ++ ++ ret = qup_i2c_bam_make_desc(qup, qup->msg); ++ if (ret) ++ break; ++ ++ /* ++ * Make DMA descriptor and schedule the BAM transfer if its ++ * already crossed the maximum length. Since the memory for all ++ * tags buffers have been taken for 2 maximum possible ++ * transfers length so it will never cross the buffer actual ++ * length. ++ */ ++ if (qup->btx.sg_cnt > qup->max_xfer_sg_len || ++ qup->brx.sg_cnt > qup->max_xfer_sg_len || ++ qup->is_last) { ++ ret = qup_i2c_bam_schedule_desc(qup); ++ if (ret) ++ break; ++ ++ qup_i2c_bam_clear_tag_buffers(qup); ++ } ++ } + +- qup->msg = msg; +- ret = qup_i2c_bam_do_xfer(qup, qup->msg, num); + out: + disable_irq(qup->irq); + +@@ -1468,7 +1493,8 @@ static int qup_i2c_probe(struct platform + else if (ret != 0) + goto nodma; + +- blocks = (MX_BLOCKS << 1) + 1; ++ qup->max_xfer_sg_len = (MX_BLOCKS << 1); ++ blocks = (MX_DMA_BLOCKS << 1) + 1; + qup->btx.sg = devm_kzalloc(&pdev->dev, + sizeof(*qup->btx.sg) * blocks, + GFP_KERNEL); +@@ -1611,7 +1637,7 @@ nodma: + one_bit_t = (USEC_PER_SEC / clk_freq) + 1; + qup->one_byte_t = one_bit_t * 9; + qup->xfer_timeout = TOUT_MIN * HZ + +- usecs_to_jiffies(MX_TX_RX_LEN * qup->one_byte_t); ++ usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t); + + dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", + qup->in_blk_sz, qup->in_fifo_sz, diff --git a/target/linux/ipq40xx/patches-4.14/088-0011-i2c-qup-send-NACK-for-last-read-sub-transfers.patch b/target/linux/ipq40xx/patches-4.14/088-0011-i2c-qup-send-NACK-for-last-read-sub-transfers.patch new file mode 100644 index 0000000000..82973829f1 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0011-i2c-qup-send-NACK-for-last-read-sub-transfers.patch @@ -0,0 +1,43 @@ +From f7714b4e451bdcb7918b9aad14af22684ceac638 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:45:00 +0530 +Subject: [PATCH 11/13] i2c: qup: send NACK for last read sub transfers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +According to I2c specification, “If a master-receiver sends a +repeated START condition, it sends a not-acknowledge (A) just +before the repeated START condition”. QUP v2 supports sending +of NACK without stop with QUP_TAG_V2_DATARD_NACK so added the +same. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Austin Christ +Reviewed-by: Andy Gross +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -104,6 +104,7 @@ + #define QUP_TAG_V2_DATAWR 0x82 + #define QUP_TAG_V2_DATAWR_STOP 0x83 + #define QUP_TAG_V2_DATARD 0x85 ++#define QUP_TAG_V2_DATARD_NACK 0x86 + #define QUP_TAG_V2_DATARD_STOP 0x87 + + /* Status, Error flags */ +@@ -606,7 +607,9 @@ static int qup_i2c_set_tags(u8 *tags, st + tags[len++] = QUP_TAG_V2_DATAWR_STOP; + } else { + if (msg->flags & I2C_M_RD) +- tags[len++] = QUP_TAG_V2_DATARD; ++ tags[len++] = qup->blk.pos == (qup->blk.count - 1) ? ++ QUP_TAG_V2_DATARD_NACK : ++ QUP_TAG_V2_DATARD; + else + tags[len++] = QUP_TAG_V2_DATAWR; + } diff --git a/target/linux/ipq40xx/patches-4.14/088-0012-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch b/target/linux/ipq40xx/patches-4.14/088-0012-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch new file mode 100644 index 0000000000..1690415265 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0012-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch @@ -0,0 +1,579 @@ +From fbfab1ab065879370541caf0e514987368eb41b2 Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:45:01 +0530 +Subject: [PATCH 12/13] i2c: qup: reorganization of driver code to remove + polling for qup v1 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Following are the major issues in current driver code + +1. The current driver simply assumes the transfer completion + whenever its gets any non-error interrupts and then simply do the + polling of available/free bytes in FIFO. +2. The block mode is not working properly since no handling in + being done for OUT_BLOCK_WRITE_REQ and IN_BLOCK_READ_REQ. + +Because of above, i2c v1 transfers of size greater than 32 are failing +with following error message + + i2c_qup 78b6000.i2c: timeout for fifo out full + +To make block mode working properly and move to use the interrupts +instead of polling, major code reorganization is required. Following +are the major changes done in this patch + +1. Remove the polling of TX FIFO free space and RX FIFO available + bytes and move to interrupts completely. QUP has QUP_MX_OUTPUT_DONE, + QUP_MX_INPUT_DONE, OUT_BLOCK_WRITE_REQ and IN_BLOCK_READ_REQ + interrupts to handle FIFO’s properly so check all these interrupts. +2. During write, For FIFO mode, TX FIFO can be directly written + without checking for FIFO space. For block mode, the QUP will generate + OUT_BLOCK_WRITE_REQ interrupt whenever it has block size of available + space. +3. During read, both TX and RX FIFO will be used. TX will be used + for writing tags and RX will be used for receiving the data. In QUP, + TX and RX can operate in separate mode so configure modes accordingly. +4. For read FIFO mode, wait for QUP_MX_INPUT_DONE interrupt which + will be generated after all the bytes have been copied in RX FIFO. For + read Block mode, QUP will generate IN_BLOCK_READ_REQ interrupts + whenever it has block size of available data. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Sricharan R +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 366 +++++++++++++++++++++-------------- + 1 file changed, 223 insertions(+), 143 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -64,8 +64,11 @@ + #define QUP_IN_SVC_FLAG BIT(9) + #define QUP_MX_OUTPUT_DONE BIT(10) + #define QUP_MX_INPUT_DONE BIT(11) ++#define OUT_BLOCK_WRITE_REQ BIT(12) ++#define IN_BLOCK_READ_REQ BIT(13) + + /* I2C mini core related values */ ++#define QUP_NO_INPUT BIT(7) + #define QUP_CLOCK_AUTO_GATE BIT(13) + #define I2C_MINI_CORE (2 << 8) + #define I2C_N_VAL 15 +@@ -137,13 +140,36 @@ + #define DEFAULT_CLK_FREQ 100000 + #define DEFAULT_SRC_CLK 20000000 + ++/* ++ * count: no of blocks ++ * pos: current block number ++ * tx_tag_len: tx tag length for current block ++ * rx_tag_len: rx tag length for current block ++ * data_len: remaining data length for current message ++ * total_tx_len: total tx length including tag bytes for current QUP transfer ++ * total_rx_len: total rx length including tag bytes for current QUP transfer ++ * tx_fifo_free: number of free bytes in current QUP block write. ++ * fifo_available: number of available bytes in RX FIFO for current ++ * QUP block read ++ * rx_bytes_read: if all the bytes have been read from rx FIFO. ++ * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer. ++ * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer. ++ * tags: contains tx tag bytes for current QUP transfer ++ */ + struct qup_i2c_block { +- int count; +- int pos; +- int tx_tag_len; +- int rx_tag_len; +- int data_len; +- u8 tags[6]; ++ int count; ++ int pos; ++ int tx_tag_len; ++ int rx_tag_len; ++ int data_len; ++ int total_tx_len; ++ int total_rx_len; ++ int tx_fifo_free; ++ int fifo_available; ++ bool rx_bytes_read; ++ bool is_tx_blk_mode; ++ bool is_rx_blk_mode; ++ u8 tags[6]; + }; + + struct qup_i2c_tag { +@@ -186,6 +212,7 @@ struct qup_i2c_dev { + + /* To check if this is the last msg */ + bool is_last; ++ bool is_qup_v1; + + /* To configure when bus is in run state */ + int config_run; +@@ -202,11 +229,18 @@ struct qup_i2c_dev { + struct qup_i2c_bam btx; + + struct completion xfer; ++ /* function to write data in tx fifo */ ++ void (*write_tx_fifo)(struct qup_i2c_dev *qup); ++ /* function to read data from rx fifo */ ++ void (*read_rx_fifo)(struct qup_i2c_dev *qup); ++ /* function to write tags in tx fifo for i2c read transfer */ ++ void (*write_rx_tags)(struct qup_i2c_dev *qup); + }; + + static irqreturn_t qup_i2c_interrupt(int irq, void *dev) + { + struct qup_i2c_dev *qup = dev; ++ struct qup_i2c_block *blk = &qup->blk; + u32 bus_err; + u32 qup_err; + u32 opflags; +@@ -253,12 +287,48 @@ static irqreturn_t qup_i2c_interrupt(int + goto done; + } + +- if (opflags & QUP_IN_SVC_FLAG) +- writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL); ++ if (!qup->is_qup_v1) ++ goto done; + +- if (opflags & QUP_OUT_SVC_FLAG) ++ if (opflags & QUP_OUT_SVC_FLAG) { + writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL); + ++ if (opflags & OUT_BLOCK_WRITE_REQ) { ++ blk->tx_fifo_free += qup->out_blk_sz; ++ if (qup->msg->flags & I2C_M_RD) ++ qup->write_rx_tags(qup); ++ else ++ qup->write_tx_fifo(qup); ++ } ++ } ++ ++ if (opflags & QUP_IN_SVC_FLAG) { ++ writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL); ++ ++ if (!blk->is_rx_blk_mode) { ++ blk->fifo_available += qup->in_fifo_sz; ++ qup->read_rx_fifo(qup); ++ } else if (opflags & IN_BLOCK_READ_REQ) { ++ blk->fifo_available += qup->in_blk_sz; ++ qup->read_rx_fifo(qup); ++ } ++ } ++ ++ if (qup->msg->flags & I2C_M_RD) { ++ if (!blk->rx_bytes_read) ++ return IRQ_HANDLED; ++ } else { ++ /* ++ * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked ++ * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags ++ * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason ++ * of interrupt for write message in FIFO mode is ++ * QUP_MAX_OUTPUT_DONE_FLAG condition. ++ */ ++ if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE)) ++ return IRQ_HANDLED; ++ } ++ + done: + qup->qup_err = qup_err; + qup->bus_err = bus_err; +@@ -324,6 +394,28 @@ static int qup_i2c_change_state(struct q + return 0; + } + ++/* Check if I2C bus returns to IDLE state */ ++static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) ++{ ++ unsigned long timeout; ++ u32 status; ++ int ret = 0; ++ ++ timeout = jiffies + len * 4; ++ for (;;) { ++ status = readl(qup->base + QUP_I2C_STATUS); ++ if (!(status & I2C_STATUS_BUS_ACTIVE)) ++ break; ++ ++ if (time_after(jiffies, timeout)) ++ ret = -ETIMEDOUT; ++ ++ usleep_range(len, len * 2); ++ } ++ ++ return ret; ++} ++ + /** + * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path + * @qup: The qup_i2c_dev device +@@ -394,23 +486,6 @@ static void qup_i2c_set_write_mode_v2(st + } + } + +-static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- /* Number of entries to shift out, including the start */ +- int total = msg->len + 1; +- +- if (total < qup->out_fifo_sz) { +- /* FIFO mode */ +- writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE); +- writel(total, qup->base + QUP_MX_WRITE_CNT); +- } else { +- /* BLOCK mode (transfer data on chunks) */ +- writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN, +- qup->base + QUP_IO_MODE); +- writel(total, qup->base + QUP_MX_OUTPUT_CNT); +- } +-} +- + static int check_for_fifo_space(struct qup_i2c_dev *qup) + { + int ret; +@@ -443,28 +518,25 @@ out: + return ret; + } + +-static int qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg) ++static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) + { ++ struct qup_i2c_block *blk = &qup->blk; ++ struct i2c_msg *msg = qup->msg; + u32 addr = msg->addr << 1; + u32 qup_tag; + int idx; + u32 val; +- int ret = 0; + + if (qup->pos == 0) { + val = QUP_TAG_START | addr; + idx = 1; ++ blk->tx_fifo_free--; + } else { + val = 0; + idx = 0; + } + +- while (qup->pos < msg->len) { +- /* Check that there's space in the FIFO for our pair */ +- ret = check_for_fifo_space(qup); +- if (ret) +- return ret; +- ++ while (blk->tx_fifo_free && qup->pos < msg->len) { + if (qup->pos == msg->len - 1) + qup_tag = QUP_TAG_STOP; + else +@@ -481,11 +553,8 @@ static int qup_i2c_issue_write(struct qu + + qup->pos++; + idx++; ++ blk->tx_fifo_free--; + } +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- +- return ret; + } + + static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup, +@@ -1006,64 +1075,6 @@ err: + return ret; + } + +-static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- int ret; +- +- qup->msg = msg; +- qup->pos = 0; +- +- enable_irq(qup->irq); +- +- qup_i2c_set_write_mode(qup, msg); +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- if (ret) +- goto err; +- +- writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); +- +- do { +- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); +- if (ret) +- goto err; +- +- ret = qup_i2c_issue_write(qup, msg); +- if (ret) +- goto err; +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- if (ret) +- goto err; +- +- ret = qup_i2c_wait_for_complete(qup, msg); +- if (ret) +- goto err; +- } while (qup->pos < msg->len); +- +- /* Wait for the outstanding data in the fifo to drain */ +- ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE); +-err: +- disable_irq(qup->irq); +- qup->msg = NULL; +- +- return ret; +-} +- +-static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len) +-{ +- if (len < qup->in_fifo_sz) { +- /* FIFO mode */ +- writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE); +- writel(len, qup->base + QUP_MX_READ_CNT); +- } else { +- /* BLOCK mode (transfer data on chunks) */ +- writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN, +- qup->base + QUP_IO_MODE); +- writel(len, qup->base + QUP_MX_INPUT_CNT); +- } +-} +- + static void qup_i2c_set_read_mode_v2(struct qup_i2c_dev *qup, int len) + { + int tx_len = qup->blk.tx_tag_len; +@@ -1086,44 +1097,27 @@ static void qup_i2c_set_read_mode_v2(str + } + } + +-static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- u32 addr, len, val; +- +- addr = i2c_8bit_addr_from_msg(msg); +- +- /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */ +- len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; +- +- val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr; +- writel(val, qup->base + QUP_OUT_FIFO_BASE); +-} +- +- +-static int qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg) ++static void qup_i2c_read_rx_fifo_v1(struct qup_i2c_dev *qup) + { ++ struct qup_i2c_block *blk = &qup->blk; ++ struct i2c_msg *msg = qup->msg; + u32 val = 0; +- int idx; +- int ret = 0; ++ int idx = 0; + +- for (idx = 0; qup->pos < msg->len; idx++) { ++ while (blk->fifo_available && qup->pos < msg->len) { + if ((idx & 1) == 0) { +- /* Check that FIFO have data */ +- ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY, +- SET_BIT, 4 * ONE_BYTE); +- if (ret) +- return ret; +- + /* Reading 2 words at time */ + val = readl(qup->base + QUP_IN_FIFO_BASE); +- + msg->buf[qup->pos++] = val & 0xFF; + } else { + msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT; + } ++ idx++; ++ blk->fifo_available--; + } + +- return ret; ++ if (qup->pos == msg->len) ++ blk->rx_bytes_read = true; + } + + static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup, +@@ -1224,49 +1218,130 @@ err: + return ret; + } + +-static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg) ++static void qup_i2c_write_rx_tags_v1(struct qup_i2c_dev *qup) + { +- int ret; ++ struct i2c_msg *msg = qup->msg; ++ u32 addr, len, val; + +- qup->msg = msg; +- qup->pos = 0; ++ addr = i2c_8bit_addr_from_msg(msg); + +- enable_irq(qup->irq); +- qup_i2c_set_read_mode(qup, msg->len); ++ /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */ ++ len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; ++ ++ val = ((QUP_TAG_REC | len) << QUP_MSW_SHIFT) | QUP_TAG_START | addr; ++ writel(val, qup->base + QUP_OUT_FIFO_BASE); ++} ++ ++static void qup_i2c_conf_v1(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ u32 qup_config = I2C_MINI_CORE | I2C_N_VAL; ++ u32 io_mode = QUP_REPACK_EN; ++ ++ blk->is_tx_blk_mode = ++ blk->total_tx_len > qup->out_fifo_sz ? true : false; ++ blk->is_rx_blk_mode = ++ blk->total_rx_len > qup->in_fifo_sz ? true : false; ++ ++ if (blk->is_tx_blk_mode) { ++ io_mode |= QUP_OUTPUT_BLK_MODE; ++ writel(0, qup->base + QUP_MX_WRITE_CNT); ++ writel(blk->total_tx_len, qup->base + QUP_MX_OUTPUT_CNT); ++ } else { ++ writel(0, qup->base + QUP_MX_OUTPUT_CNT); ++ writel(blk->total_tx_len, qup->base + QUP_MX_WRITE_CNT); ++ } ++ ++ if (blk->total_rx_len) { ++ if (blk->is_rx_blk_mode) { ++ io_mode |= QUP_INPUT_BLK_MODE; ++ writel(0, qup->base + QUP_MX_READ_CNT); ++ writel(blk->total_rx_len, qup->base + QUP_MX_INPUT_CNT); ++ } else { ++ writel(0, qup->base + QUP_MX_INPUT_CNT); ++ writel(blk->total_rx_len, qup->base + QUP_MX_READ_CNT); ++ } ++ } else { ++ qup_config |= QUP_NO_INPUT; ++ } ++ ++ writel(qup_config, qup->base + QUP_CONFIG); ++ writel(io_mode, qup->base + QUP_IO_MODE); ++} + ++static void qup_i2c_clear_blk_v1(struct qup_i2c_block *blk) ++{ ++ blk->tx_fifo_free = 0; ++ blk->fifo_available = 0; ++ blk->rx_bytes_read = false; ++} ++ ++static int qup_i2c_conf_xfer_v1(struct qup_i2c_dev *qup, bool is_rx) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ int ret; ++ ++ qup_i2c_clear_blk_v1(blk); ++ qup_i2c_conf_v1(qup); + ret = qup_i2c_change_state(qup, QUP_RUN_STATE); + if (ret) +- goto err; ++ return ret; + + writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); + + ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); + if (ret) +- goto err; ++ return ret; ++ ++ reinit_completion(&qup->xfer); ++ enable_irq(qup->irq); ++ if (!blk->is_tx_blk_mode) { ++ blk->tx_fifo_free = qup->out_fifo_sz; + +- qup_i2c_issue_read(qup, msg); ++ if (is_rx) ++ qup_i2c_write_rx_tags_v1(qup); ++ else ++ qup_i2c_write_tx_fifo_v1(qup); ++ } + + ret = qup_i2c_change_state(qup, QUP_RUN_STATE); + if (ret) + goto err; + +- do { +- ret = qup_i2c_wait_for_complete(qup, msg); +- if (ret) +- goto err; ++ ret = qup_i2c_wait_for_complete(qup, qup->msg); ++ if (ret) ++ goto err; + +- ret = qup_i2c_read_fifo(qup, msg); +- if (ret) +- goto err; +- } while (qup->pos < msg->len); ++ ret = qup_i2c_bus_active(qup, ONE_BYTE); + + err: + disable_irq(qup->irq); +- qup->msg = NULL; +- + return ret; + } + ++static int qup_i2c_write_one(struct qup_i2c_dev *qup) ++{ ++ struct i2c_msg *msg = qup->msg; ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ qup->pos = 0; ++ blk->total_tx_len = msg->len + 1; ++ blk->total_rx_len = 0; ++ ++ return qup_i2c_conf_xfer_v1(qup, false); ++} ++ ++static int qup_i2c_read_one(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ qup->pos = 0; ++ blk->total_tx_len = 2; ++ blk->total_rx_len = qup->msg->len; ++ ++ return qup_i2c_conf_xfer_v1(qup, true); ++} ++ + static int qup_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg msgs[], + int num) +@@ -1305,10 +1380,11 @@ static int qup_i2c_xfer(struct i2c_adapt + goto out; + } + ++ qup->msg = &msgs[idx]; + if (msgs[idx].flags & I2C_M_RD) +- ret = qup_i2c_read_one(qup, &msgs[idx]); ++ ret = qup_i2c_read_one(qup); + else +- ret = qup_i2c_write_one(qup, &msgs[idx]); ++ ret = qup_i2c_write_one(qup); + + if (ret) + break; +@@ -1487,6 +1563,10 @@ static int qup_i2c_probe(struct platform + if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) { + qup->adap.algo = &qup_i2c_algo; + qup->adap.quirks = &qup_i2c_quirks; ++ qup->is_qup_v1 = true; ++ qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1; ++ qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1; ++ qup->write_rx_tags = qup_i2c_write_rx_tags_v1; + } else { + qup->adap.algo = &qup_i2c_algo_v2; + ret = qup_i2c_req_dma(qup); diff --git a/target/linux/ipq40xx/patches-4.14/088-0013-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch b/target/linux/ipq40xx/patches-4.14/088-0013-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch new file mode 100644 index 0000000000..6d32882117 --- /dev/null +++ b/target/linux/ipq40xx/patches-4.14/088-0013-i2c-qup-reorganization-of-driver-code-to-remove-poll.patch @@ -0,0 +1,1154 @@ +From 7545c7dba169c4c29ba5f6ab8706267a84c0febe Mon Sep 17 00:00:00 2001 +From: Abhishek Sahu +Date: Mon, 12 Mar 2018 18:45:02 +0530 +Subject: [PATCH 13/13] i2c: qup: reorganization of driver code to remove + polling for qup v2 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Following are the major issues in current driver code + +1. The current driver simply assumes the transfer completion + whenever its gets any non-error interrupts and then simply do the + polling of available/free bytes in FIFO. +2. The block mode is not working properly since no handling in + being done for OUT_BLOCK_WRITE_REQ and IN_BLOCK_READ_READ. +3. An i2c transfer can contain multiple message and QUP v2 + supports reconfiguration during run in which the mode should be same + for all the sub transfer. Currently the mode is being programmed + before every sub transfer which is functionally wrong. If one message + is less than FIFO length and other message is greater than FIFO + length, then transfers will fail. + +Because of above, i2c v2 transfers of size greater than 64 are failing +with following error message + + i2c_qup 78b6000.i2c: timeout for fifo out full + +To make block mode working properly and move to use the interrupts +instead of polling, major code reorganization is required. Following +are the major changes done in this patch + +1. Remove the polling of TX FIFO free space and RX FIFO available + bytes and move to interrupts completely. QUP has QUP_MX_OUTPUT_DONE, + QUP_MX_INPUT_DONE, OUT_BLOCK_WRITE_REQ and IN_BLOCK_READ_REQ + interrupts to handle FIFO’s properly so check all these interrupts. +2. Determine the mode for transfer before starting by checking + all the tx/rx data length in each message. The complete message can be + transferred either in DMA mode or Programmed IO by FIFO/Block mode. + in DMA mode, both tx and rx uses same mode but in PIO mode, the TX and + RX can be in different mode. +3. During write, For FIFO mode, TX FIFO can be directly written + without checking for FIFO space. For block mode, the QUP will generate + OUT_BLOCK_WRITE_REQ interrupt whenever it has block size of available + space. +4. During read, both TX and RX FIFO will be used. TX will be used + for writing tags and RX will be used for receiving the data. In QUP, + TX and RX can operate in separate mode so configure modes accordingly. +5. For read FIFO mode, wait for QUP_MX_INPUT_DONE interrupt which + will be generated after all the bytes have been copied in RX FIFO. For + read Block mode, QUP will generate IN_BLOCK_READ_REQ interrupts + whenever it has block size of available data. +6. Split the transfer in chunk of one QUP block size(256 bytes) + and schedule each block separately. QUP v2 supports reconfiguration + during run in which QUP can transfer multiple blocks without issuing a + stop events. +7. Port the SMBus block read support for new code changes. + +Signed-off-by: Abhishek Sahu +Reviewed-by: Sricharan R +Signed-off-by: Wolfram Sang +--- + drivers/i2c/busses/i2c-qup.c | 900 ++++++++++++++++++++--------------- + 1 file changed, 515 insertions(+), 385 deletions(-) + +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -141,17 +141,40 @@ + #define DEFAULT_SRC_CLK 20000000 + + /* ++ * Max tags length (start, stop and maximum 2 bytes address) for each QUP ++ * data transfer ++ */ ++#define QUP_MAX_TAGS_LEN 4 ++/* Max data length for each DATARD tags */ ++#define RECV_MAX_DATA_LEN 254 ++/* TAG length for DATA READ in RX FIFO */ ++#define READ_RX_TAGS_LEN 2 ++ ++/* + * count: no of blocks + * pos: current block number + * tx_tag_len: tx tag length for current block + * rx_tag_len: rx tag length for current block + * data_len: remaining data length for current message ++ * cur_blk_len: data length for current block + * total_tx_len: total tx length including tag bytes for current QUP transfer + * total_rx_len: total rx length including tag bytes for current QUP transfer ++ * tx_fifo_data_pos: current byte number in TX FIFO word + * tx_fifo_free: number of free bytes in current QUP block write. ++ * rx_fifo_data_pos: current byte number in RX FIFO word + * fifo_available: number of available bytes in RX FIFO for current + * QUP block read ++ * tx_fifo_data: QUP TX FIFO write works on word basis (4 bytes). New byte write ++ * to TX FIFO will be appended in this data and will be written to ++ * TX FIFO when all the 4 bytes are available. ++ * rx_fifo_data: QUP RX FIFO read works on word basis (4 bytes). This will ++ * contains the 4 bytes of RX data. ++ * cur_data: pointer to tell cur data position for current message ++ * cur_tx_tags: pointer to tell cur position in tags ++ * tx_tags_sent: all tx tag bytes have been written in FIFO word ++ * send_last_word: for tx FIFO, last word send is pending in current block + * rx_bytes_read: if all the bytes have been read from rx FIFO. ++ * rx_tags_fetched: all the rx tag bytes have been fetched from rx fifo word + * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer. + * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer. + * tags: contains tx tag bytes for current QUP transfer +@@ -162,10 +185,20 @@ struct qup_i2c_block { + int tx_tag_len; + int rx_tag_len; + int data_len; ++ int cur_blk_len; + int total_tx_len; + int total_rx_len; ++ int tx_fifo_data_pos; + int tx_fifo_free; ++ int rx_fifo_data_pos; + int fifo_available; ++ u32 tx_fifo_data; ++ u32 rx_fifo_data; ++ u8 *cur_data; ++ u8 *cur_tx_tags; ++ bool tx_tags_sent; ++ bool send_last_word; ++ bool rx_tags_fetched; + bool rx_bytes_read; + bool is_tx_blk_mode; + bool is_rx_blk_mode; +@@ -198,6 +231,7 @@ struct qup_i2c_dev { + int out_blk_sz; + int in_blk_sz; + ++ int blk_xfer_limit; + unsigned long one_byte_t; + unsigned long xfer_timeout; + struct qup_i2c_block blk; +@@ -212,10 +246,10 @@ struct qup_i2c_dev { + + /* To check if this is the last msg */ + bool is_last; +- bool is_qup_v1; ++ bool is_smbus_read; + + /* To configure when bus is in run state */ +- int config_run; ++ u32 config_run; + + /* dma parameters */ + bool is_dma; +@@ -223,6 +257,8 @@ struct qup_i2c_dev { + bool use_dma; + unsigned int max_xfer_sg_len; + unsigned int tag_buf_pos; ++ /* The threshold length above which block mode will be used */ ++ unsigned int blk_mode_threshold; + struct dma_pool *dpool; + struct qup_i2c_tag start_tag; + struct qup_i2c_bam brx; +@@ -287,9 +323,6 @@ static irqreturn_t qup_i2c_interrupt(int + goto done; + } + +- if (!qup->is_qup_v1) +- goto done; +- + if (opflags & QUP_OUT_SVC_FLAG) { + writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL); + +@@ -416,108 +449,6 @@ static int qup_i2c_bus_active(struct qup + return ret; + } + +-/** +- * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path +- * @qup: The qup_i2c_dev device +- * @op: The bit/event to wait on +- * @val: value of the bit to wait on, 0 or 1 +- * @len: The length the bytes to be transferred +- */ +-static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val, +- int len) +-{ +- unsigned long timeout; +- u32 opflags; +- u32 status; +- u32 shift = __ffs(op); +- int ret = 0; +- +- len *= qup->one_byte_t; +- /* timeout after a wait of twice the max time */ +- timeout = jiffies + len * 4; +- +- for (;;) { +- opflags = readl(qup->base + QUP_OPERATIONAL); +- status = readl(qup->base + QUP_I2C_STATUS); +- +- if (((opflags & op) >> shift) == val) { +- if ((op == QUP_OUT_NOT_EMPTY) && qup->is_last) { +- if (!(status & I2C_STATUS_BUS_ACTIVE)) { +- ret = 0; +- goto done; +- } +- } else { +- ret = 0; +- goto done; +- } +- } +- +- if (time_after(jiffies, timeout)) { +- ret = -ETIMEDOUT; +- goto done; +- } +- usleep_range(len, len * 2); +- } +- +-done: +- if (qup->bus_err || qup->qup_err) +- ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; +- +- return ret; +-} +- +-static void qup_i2c_set_write_mode_v2(struct qup_i2c_dev *qup, +- struct i2c_msg *msg) +-{ +- /* Number of entries to shift out, including the tags */ +- int total = msg->len + qup->blk.tx_tag_len; +- +- total |= qup->config_run; +- +- if (total < qup->out_fifo_sz) { +- /* FIFO mode */ +- writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE); +- writel(total, qup->base + QUP_MX_WRITE_CNT); +- } else { +- /* BLOCK mode (transfer data on chunks) */ +- writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN, +- qup->base + QUP_IO_MODE); +- writel(total, qup->base + QUP_MX_OUTPUT_CNT); +- } +-} +- +-static int check_for_fifo_space(struct qup_i2c_dev *qup) +-{ +- int ret; +- +- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); +- if (ret) +- goto out; +- +- ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL, +- RESET_BIT, 4 * ONE_BYTE); +- if (ret) { +- /* Fifo is full. Drain out the fifo */ +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- if (ret) +- goto out; +- +- ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, +- RESET_BIT, 256 * ONE_BYTE); +- if (ret) { +- dev_err(qup->dev, "timeout for fifo out full"); +- goto out; +- } +- +- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); +- if (ret) +- goto out; +- } +- +-out: +- return ret; +-} +- + static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) + { + struct qup_i2c_block *blk = &qup->blk; +@@ -560,60 +491,17 @@ static void qup_i2c_write_tx_fifo_v1(str + static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup, + struct i2c_msg *msg) + { +- memset(&qup->blk, 0, sizeof(qup->blk)); +- ++ qup->blk.pos = 0; + qup->blk.data_len = msg->len; +- qup->blk.count = (msg->len + QUP_READ_LIMIT - 1) / QUP_READ_LIMIT; +- +- /* 4 bytes for first block and 2 writes for rest */ +- qup->blk.tx_tag_len = 4 + (qup->blk.count - 1) * 2; +- +- /* There are 2 tag bytes that are read in to fifo for every block */ +- if (msg->flags & I2C_M_RD) +- qup->blk.rx_tag_len = qup->blk.count * 2; +-} +- +-static int qup_i2c_send_data(struct qup_i2c_dev *qup, int tlen, u8 *tbuf, +- int dlen, u8 *dbuf) +-{ +- u32 val = 0, idx = 0, pos = 0, i = 0, t; +- int len = tlen + dlen; +- u8 *buf = tbuf; +- int ret = 0; +- +- while (len > 0) { +- ret = check_for_fifo_space(qup); +- if (ret) +- return ret; +- +- t = (len >= 4) ? 4 : len; +- +- while (idx < t) { +- if (!i && (pos >= tlen)) { +- buf = dbuf; +- pos = 0; +- i = 1; +- } +- val |= buf[pos++] << (idx++ * 8); +- } +- +- writel(val, qup->base + QUP_OUT_FIFO_BASE); +- idx = 0; +- val = 0; +- len -= 4; +- } +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- +- return ret; ++ qup->blk.count = DIV_ROUND_UP(msg->len, qup->blk_xfer_limit); + } + + static int qup_i2c_get_data_len(struct qup_i2c_dev *qup) + { + int data_len; + +- if (qup->blk.data_len > QUP_READ_LIMIT) +- data_len = QUP_READ_LIMIT; ++ if (qup->blk.data_len > qup->blk_xfer_limit) ++ data_len = qup->blk_xfer_limit; + else + data_len = qup->blk.data_len; + +@@ -630,9 +518,9 @@ static int qup_i2c_set_tags_smb(u16 addr + { + int len = 0; + +- if (msg->len > 1) { ++ if (qup->is_smbus_read) { + tags[len++] = QUP_TAG_V2_DATARD_STOP; +- tags[len++] = qup_i2c_get_data_len(qup) - 1; ++ tags[len++] = qup_i2c_get_data_len(qup); + } else { + tags[len++] = QUP_TAG_V2_START; + tags[len++] = addr & 0xff; +@@ -694,24 +582,6 @@ static int qup_i2c_set_tags(u8 *tags, st + return len; + } + +-static int qup_i2c_issue_xfer_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- int data_len = 0, tag_len, index; +- int ret; +- +- tag_len = qup_i2c_set_tags(qup->blk.tags, qup, msg); +- index = msg->len - qup->blk.data_len; +- +- /* only tags are written for read */ +- if (!(msg->flags & I2C_M_RD)) +- data_len = qup_i2c_get_data_len(qup); +- +- ret = qup_i2c_send_data(qup, tag_len, qup->blk.tags, +- data_len, &msg->buf[index]); +- qup->blk.data_len -= data_len; +- +- return ret; +-} + + static void qup_i2c_bam_cb(void *data) + { +@@ -778,6 +648,7 @@ static int qup_i2c_bam_make_desc(struct + u32 i = 0, tlen, tx_len = 0; + u8 *tags; + ++ qup->blk_xfer_limit = QUP_READ_LIMIT; + qup_i2c_set_blk_data(qup, msg); + + blocks = qup->blk.count; +@@ -1026,7 +897,7 @@ static int qup_i2c_wait_for_complete(str + unsigned long left; + int ret = 0; + +- left = wait_for_completion_timeout(&qup->xfer, HZ); ++ left = wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout); + if (!left) { + writel(1, qup->base + QUP_SW_RESET); + ret = -ETIMEDOUT; +@@ -1038,65 +909,6 @@ static int qup_i2c_wait_for_complete(str + return ret; + } + +-static int qup_i2c_write_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- int ret = 0; +- +- qup->msg = msg; +- qup->pos = 0; +- enable_irq(qup->irq); +- qup_i2c_set_blk_data(qup, msg); +- qup_i2c_set_write_mode_v2(qup, msg); +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- if (ret) +- goto err; +- +- writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); +- +- do { +- ret = qup_i2c_issue_xfer_v2(qup, msg); +- if (ret) +- goto err; +- +- ret = qup_i2c_wait_for_complete(qup, msg); +- if (ret) +- goto err; +- +- qup->blk.pos++; +- } while (qup->blk.pos < qup->blk.count); +- +- ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE); +- +-err: +- disable_irq(qup->irq); +- qup->msg = NULL; +- +- return ret; +-} +- +-static void qup_i2c_set_read_mode_v2(struct qup_i2c_dev *qup, int len) +-{ +- int tx_len = qup->blk.tx_tag_len; +- +- len += qup->blk.rx_tag_len; +- len |= qup->config_run; +- tx_len |= qup->config_run; +- +- if (len < qup->in_fifo_sz) { +- /* FIFO mode */ +- writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE); +- writel(tx_len, qup->base + QUP_MX_WRITE_CNT); +- writel(len, qup->base + QUP_MX_READ_CNT); +- } else { +- /* BLOCK mode (transfer data on chunks) */ +- writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN, +- qup->base + QUP_IO_MODE); +- writel(tx_len, qup->base + QUP_MX_OUTPUT_CNT); +- writel(len, qup->base + QUP_MX_INPUT_CNT); +- } +-} +- + static void qup_i2c_read_rx_fifo_v1(struct qup_i2c_dev *qup) + { + struct qup_i2c_block *blk = &qup->blk; +@@ -1120,104 +932,6 @@ static void qup_i2c_read_rx_fifo_v1(stru + blk->rx_bytes_read = true; + } + +-static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup, +- struct i2c_msg *msg) +-{ +- u32 val; +- int idx, pos = 0, ret = 0, total, msg_offset = 0; +- +- /* +- * If the message length is already read in +- * the first byte of the buffer, account for +- * that by setting the offset +- */ +- if (qup_i2c_check_msg_len(msg) && (msg->len > 1)) +- msg_offset = 1; +- total = qup_i2c_get_data_len(qup); +- total -= msg_offset; +- +- /* 2 extra bytes for read tags */ +- while (pos < (total + 2)) { +- /* Check that FIFO have data */ +- ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY, +- SET_BIT, 4 * ONE_BYTE); +- if (ret) { +- dev_err(qup->dev, "timeout for fifo not empty"); +- return ret; +- } +- val = readl(qup->base + QUP_IN_FIFO_BASE); +- +- for (idx = 0; idx < 4; idx++, val >>= 8, pos++) { +- /* first 2 bytes are tag bytes */ +- if (pos < 2) +- continue; +- +- if (pos >= (total + 2)) +- goto out; +- msg->buf[qup->pos + msg_offset] = val & 0xff; +- qup->pos++; +- } +- } +- +-out: +- qup->blk.data_len -= total; +- +- return ret; +-} +- +-static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg) +-{ +- int ret = 0; +- +- qup->msg = msg; +- qup->pos = 0; +- enable_irq(qup->irq); +- qup_i2c_set_blk_data(qup, msg); +- qup_i2c_set_read_mode_v2(qup, msg->len); +- +- ret = qup_i2c_change_state(qup, QUP_RUN_STATE); +- if (ret) +- goto err; +- +- writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); +- +- do { +- ret = qup_i2c_issue_xfer_v2(qup, msg); +- if (ret) +- goto err; +- +- ret = qup_i2c_wait_for_complete(qup, msg); +- if (ret) +- goto err; +- +- ret = qup_i2c_read_fifo_v2(qup, msg); +- if (ret) +- goto err; +- +- qup->blk.pos++; +- +- /* Handle SMBus block read length */ +- if (qup_i2c_check_msg_len(msg) && (msg->len == 1)) { +- if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) { +- ret = -EPROTO; +- goto err; +- } +- msg->len += msg->buf[0]; +- qup->pos = 0; +- qup_i2c_set_blk_data(qup, msg); +- /* set tag length for block read */ +- qup->blk.tx_tag_len = 2; +- qup_i2c_set_read_mode_v2(qup, msg->buf[0]); +- } +- } while (qup->blk.pos < qup->blk.count); +- +-err: +- disable_irq(qup->irq); +- qup->msg = NULL; +- +- return ret; +-} +- + static void qup_i2c_write_rx_tags_v1(struct qup_i2c_dev *qup) + { + struct i2c_msg *msg = qup->msg; +@@ -1404,13 +1118,434 @@ out: + return ret; + } + ++/* ++ * Configure registers related with reconfiguration during run and call it ++ * before each i2c sub transfer. ++ */ ++static void qup_i2c_conf_count_v2(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ u32 qup_config = I2C_MINI_CORE | I2C_N_VAL_V2; ++ ++ if (blk->is_tx_blk_mode) ++ writel(qup->config_run | blk->total_tx_len, ++ qup->base + QUP_MX_OUTPUT_CNT); ++ else ++ writel(qup->config_run | blk->total_tx_len, ++ qup->base + QUP_MX_WRITE_CNT); ++ ++ if (blk->total_rx_len) { ++ if (blk->is_rx_blk_mode) ++ writel(qup->config_run | blk->total_rx_len, ++ qup->base + QUP_MX_INPUT_CNT); ++ else ++ writel(qup->config_run | blk->total_rx_len, ++ qup->base + QUP_MX_READ_CNT); ++ } else { ++ qup_config |= QUP_NO_INPUT; ++ } ++ ++ writel(qup_config, qup->base + QUP_CONFIG); ++} ++ ++/* ++ * Configure registers related with transfer mode (FIFO/Block) ++ * before starting of i2c transfer. It will be called only once in ++ * QUP RESET state. ++ */ ++static void qup_i2c_conf_mode_v2(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ u32 io_mode = QUP_REPACK_EN; ++ ++ if (blk->is_tx_blk_mode) { ++ io_mode |= QUP_OUTPUT_BLK_MODE; ++ writel(0, qup->base + QUP_MX_WRITE_CNT); ++ } else { ++ writel(0, qup->base + QUP_MX_OUTPUT_CNT); ++ } ++ ++ if (blk->is_rx_blk_mode) { ++ io_mode |= QUP_INPUT_BLK_MODE; ++ writel(0, qup->base + QUP_MX_READ_CNT); ++ } else { ++ writel(0, qup->base + QUP_MX_INPUT_CNT); ++ } ++ ++ writel(io_mode, qup->base + QUP_IO_MODE); ++} ++ ++/* Clear required variables before starting of any QUP v2 sub transfer. */ ++static void qup_i2c_clear_blk_v2(struct qup_i2c_block *blk) ++{ ++ blk->send_last_word = false; ++ blk->tx_tags_sent = false; ++ blk->tx_fifo_data = 0; ++ blk->tx_fifo_data_pos = 0; ++ blk->tx_fifo_free = 0; ++ ++ blk->rx_tags_fetched = false; ++ blk->rx_bytes_read = false; ++ blk->rx_fifo_data = 0; ++ blk->rx_fifo_data_pos = 0; ++ blk->fifo_available = 0; ++} ++ ++/* Receive data from RX FIFO for read message in QUP v2 i2c transfer. */ ++static void qup_i2c_recv_data(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ int j; ++ ++ for (j = blk->rx_fifo_data_pos; ++ blk->cur_blk_len && blk->fifo_available; ++ blk->cur_blk_len--, blk->fifo_available--) { ++ if (j == 0) ++ blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); ++ ++ *(blk->cur_data++) = blk->rx_fifo_data; ++ blk->rx_fifo_data >>= 8; ++ ++ if (j == 3) ++ j = 0; ++ else ++ j++; ++ } ++ ++ blk->rx_fifo_data_pos = j; ++} ++ ++/* Receive tags for read message in QUP v2 i2c transfer. */ ++static void qup_i2c_recv_tags(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ blk->rx_fifo_data = readl(qup->base + QUP_IN_FIFO_BASE); ++ blk->rx_fifo_data >>= blk->rx_tag_len * 8; ++ blk->rx_fifo_data_pos = blk->rx_tag_len; ++ blk->fifo_available -= blk->rx_tag_len; ++} ++ ++/* ++ * Read the data and tags from RX FIFO. Since in read case, the tags will be ++ * preceded by received data bytes so ++ * 1. Check if rx_tags_fetched is false i.e. the start of QUP block so receive ++ * all tag bytes and discard that. ++ * 2. Read the data from RX FIFO. When all the data bytes have been read then ++ * set rx_bytes_read to true. ++ */ ++static void qup_i2c_read_rx_fifo_v2(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ if (!blk->rx_tags_fetched) { ++ qup_i2c_recv_tags(qup); ++ blk->rx_tags_fetched = true; ++ } ++ ++ qup_i2c_recv_data(qup); ++ if (!blk->cur_blk_len) ++ blk->rx_bytes_read = true; ++} ++ ++/* ++ * Write bytes in TX FIFO for write message in QUP v2 i2c transfer. QUP TX FIFO ++ * write works on word basis (4 bytes). Append new data byte write for TX FIFO ++ * in tx_fifo_data and write to TX FIFO when all the 4 bytes are present. ++ */ ++static void ++qup_i2c_write_blk_data(struct qup_i2c_dev *qup, u8 **data, unsigned int *len) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ unsigned int j; ++ ++ for (j = blk->tx_fifo_data_pos; *len && blk->tx_fifo_free; ++ (*len)--, blk->tx_fifo_free--) { ++ blk->tx_fifo_data |= *(*data)++ << (j * 8); ++ if (j == 3) { ++ writel(blk->tx_fifo_data, ++ qup->base + QUP_OUT_FIFO_BASE); ++ blk->tx_fifo_data = 0x0; ++ j = 0; ++ } else { ++ j++; ++ } ++ } ++ ++ blk->tx_fifo_data_pos = j; ++} ++ ++/* Transfer tags for read message in QUP v2 i2c transfer. */ ++static void qup_i2c_write_rx_tags_v2(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len); ++ if (blk->tx_fifo_data_pos) ++ writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); ++} ++ ++/* ++ * Write the data and tags in TX FIFO. Since in write case, both tags and data ++ * need to be written and QUP write tags can have maximum 256 data length, so ++ * ++ * 1. Check if tx_tags_sent is false i.e. the start of QUP block so write the ++ * tags to TX FIFO and set tx_tags_sent to true. ++ * 2. Check if send_last_word is true. It will be set when last few data bytes ++ * (less than 4 bytes) are reamining to be written in FIFO because of no FIFO ++ * space. All this data bytes are available in tx_fifo_data so write this ++ * in FIFO. ++ * 3. Write the data to TX FIFO and check for cur_blk_len. If it is non zero ++ * then more data is pending otherwise following 3 cases can be possible ++ * a. if tx_fifo_data_pos is zero i.e. all the data bytes in this block ++ * have been written in TX FIFO so nothing else is required. ++ * b. tx_fifo_free is non zero i.e tx FIFO is free so copy the remaining data ++ * from tx_fifo_data to tx FIFO. Since, qup_i2c_write_blk_data do write ++ * in 4 bytes and FIFO space is in multiple of 4 bytes so tx_fifo_free ++ * will be always greater than or equal to 4 bytes. ++ * c. tx_fifo_free is zero. In this case, last few bytes (less than 4 ++ * bytes) are copied to tx_fifo_data but couldn't be sent because of ++ * FIFO full so make send_last_word true. ++ */ ++static void qup_i2c_write_tx_fifo_v2(struct qup_i2c_dev *qup) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ ++ if (!blk->tx_tags_sent) { ++ qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, ++ &blk->tx_tag_len); ++ blk->tx_tags_sent = true; ++ } ++ ++ if (blk->send_last_word) ++ goto send_last_word; ++ ++ qup_i2c_write_blk_data(qup, &blk->cur_data, &blk->cur_blk_len); ++ if (!blk->cur_blk_len) { ++ if (!blk->tx_fifo_data_pos) ++ return; ++ ++ if (blk->tx_fifo_free) ++ goto send_last_word; ++ ++ blk->send_last_word = true; ++ } ++ ++ return; ++ ++send_last_word: ++ writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE); ++} ++ ++/* ++ * Main transfer function which read or write i2c data. ++ * The QUP v2 supports reconfiguration during run in which multiple i2c sub ++ * transfers can be scheduled. ++ */ ++static int ++qup_i2c_conf_xfer_v2(struct qup_i2c_dev *qup, bool is_rx, bool is_first, ++ bool change_pause_state) ++{ ++ struct qup_i2c_block *blk = &qup->blk; ++ struct i2c_msg *msg = qup->msg; ++ int ret; ++ ++ /* ++ * Check if its SMBus Block read for which the top level read will be ++ * done into 2 QUP reads. One with message length 1 while other one is ++ * with actual length. ++ */ ++ if (qup_i2c_check_msg_len(msg)) { ++ if (qup->is_smbus_read) { ++ /* ++ * If the message length is already read in ++ * the first byte of the buffer, account for ++ * that by setting the offset ++ */ ++ blk->cur_data += 1; ++ is_first = false; ++ } else { ++ change_pause_state = false; ++ } ++ } ++ ++ qup->config_run = is_first ? 0 : QUP_I2C_MX_CONFIG_DURING_RUN; ++ ++ qup_i2c_clear_blk_v2(blk); ++ qup_i2c_conf_count_v2(qup); ++ ++ /* If it is first sub transfer, then configure i2c bus clocks */ ++ if (is_first) { ++ ret = qup_i2c_change_state(qup, QUP_RUN_STATE); ++ if (ret) ++ return ret; ++ ++ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL); ++ ++ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); ++ if (ret) ++ return ret; ++ } ++ ++ reinit_completion(&qup->xfer); ++ enable_irq(qup->irq); ++ /* ++ * In FIFO mode, tx FIFO can be written directly while in block mode the ++ * it will be written after getting OUT_BLOCK_WRITE_REQ interrupt ++ */ ++ if (!blk->is_tx_blk_mode) { ++ blk->tx_fifo_free = qup->out_fifo_sz; ++ ++ if (is_rx) ++ qup_i2c_write_rx_tags_v2(qup); ++ else ++ qup_i2c_write_tx_fifo_v2(qup); ++ } ++ ++ ret = qup_i2c_change_state(qup, QUP_RUN_STATE); ++ if (ret) ++ goto err; ++ ++ ret = qup_i2c_wait_for_complete(qup, msg); ++ if (ret) ++ goto err; ++ ++ /* Move to pause state for all the transfers, except last one */ ++ if (change_pause_state) { ++ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); ++ if (ret) ++ goto err; ++ } ++ ++err: ++ disable_irq(qup->irq); ++ return ret; ++} ++ ++/* ++ * Transfer one read/write message in i2c transfer. It splits the message into ++ * multiple of blk_xfer_limit data length blocks and schedule each ++ * QUP block individually. ++ */ ++static int qup_i2c_xfer_v2_msg(struct qup_i2c_dev *qup, int msg_id, bool is_rx) ++{ ++ int ret = 0; ++ unsigned int data_len, i; ++ struct i2c_msg *msg = qup->msg; ++ struct qup_i2c_block *blk = &qup->blk; ++ u8 *msg_buf = msg->buf; ++ ++ qup->blk_xfer_limit = is_rx ? RECV_MAX_DATA_LEN : QUP_READ_LIMIT; ++ qup_i2c_set_blk_data(qup, msg); ++ ++ for (i = 0; i < blk->count; i++) { ++ data_len = qup_i2c_get_data_len(qup); ++ blk->pos = i; ++ blk->cur_tx_tags = blk->tags; ++ blk->cur_blk_len = data_len; ++ blk->tx_tag_len = ++ qup_i2c_set_tags(blk->cur_tx_tags, qup, qup->msg); ++ ++ blk->cur_data = msg_buf; ++ ++ if (is_rx) { ++ blk->total_tx_len = blk->tx_tag_len; ++ blk->rx_tag_len = 2; ++ blk->total_rx_len = blk->rx_tag_len + data_len; ++ } else { ++ blk->total_tx_len = blk->tx_tag_len + data_len; ++ blk->total_rx_len = 0; ++ } ++ ++ ret = qup_i2c_conf_xfer_v2(qup, is_rx, !msg_id && !i, ++ !qup->is_last || i < blk->count - 1); ++ if (ret) ++ return ret; ++ ++ /* Handle SMBus block read length */ ++ if (qup_i2c_check_msg_len(msg) && msg->len == 1 && ++ !qup->is_smbus_read) { ++ if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) ++ return -EPROTO; ++ ++ msg->len = msg->buf[0]; ++ qup->is_smbus_read = true; ++ ret = qup_i2c_xfer_v2_msg(qup, msg_id, true); ++ qup->is_smbus_read = false; ++ if (ret) ++ return ret; ++ ++ msg->len += 1; ++ } ++ ++ msg_buf += data_len; ++ blk->data_len -= qup->blk_xfer_limit; ++ } ++ ++ return ret; ++} ++ ++/* ++ * QUP v2 supports 3 modes ++ * Programmed IO using FIFO mode : Less than FIFO size ++ * Programmed IO using Block mode : Greater than FIFO size ++ * DMA using BAM : Appropriate for any transaction size but the address should ++ * be DMA applicable ++ * ++ * This function determines the mode which will be used for this transfer. An ++ * i2c transfer contains multiple message. Following are the rules to determine ++ * the mode used. ++ * 1. Determine complete length, maximum tx and rx length for complete transfer. ++ * 2. If complete transfer length is greater than fifo size then use the DMA ++ * mode. ++ * 3. In FIFO or block mode, tx and rx can operate in different mode so check ++ * for maximum tx and rx length to determine mode. ++ */ ++static int ++qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, ++ struct i2c_msg msgs[], int num) ++{ ++ int idx; ++ bool no_dma = false; ++ unsigned int max_tx_len = 0, max_rx_len = 0, total_len = 0; ++ ++ /* All i2c_msgs should be transferred using either dma or cpu */ ++ for (idx = 0; idx < num; idx++) { ++ if (msgs[idx].len == 0) ++ return -EINVAL; ++ ++ if (msgs[idx].flags & I2C_M_RD) ++ max_rx_len = max_t(unsigned int, max_rx_len, ++ msgs[idx].len); ++ else ++ max_tx_len = max_t(unsigned int, max_tx_len, ++ msgs[idx].len); ++ ++ if (is_vmalloc_addr(msgs[idx].buf)) ++ no_dma = true; ++ ++ total_len += msgs[idx].len; ++ } ++ ++ if (!no_dma && qup->is_dma && ++ (total_len > qup->out_fifo_sz || total_len > qup->in_fifo_sz)) { ++ qup->use_dma = true; ++ } else { ++ qup->blk.is_tx_blk_mode = max_tx_len > qup->out_fifo_sz - ++ QUP_MAX_TAGS_LEN ? true : false; ++ qup->blk.is_rx_blk_mode = max_rx_len > qup->in_fifo_sz - ++ READ_RX_TAGS_LEN ? true : false; ++ } ++ ++ return 0; ++} ++ + static int qup_i2c_xfer_v2(struct i2c_adapter *adap, + struct i2c_msg msgs[], + int num) + { + struct qup_i2c_dev *qup = i2c_get_adapdata(adap); + int ret, idx = 0; +- unsigned int total_len = 0; + + qup->bus_err = 0; + qup->qup_err = 0; +@@ -1419,6 +1554,10 @@ static int qup_i2c_xfer_v2(struct i2c_ad + if (ret < 0) + goto out; + ++ ret = qup_i2c_determine_mode_v2(qup, msgs, num); ++ if (ret) ++ goto out; ++ + writel(1, qup->base + QUP_SW_RESET); + ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); + if (ret) +@@ -1428,60 +1567,35 @@ static int qup_i2c_xfer_v2(struct i2c_ad + writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG); + writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN); + +- if ((qup->is_dma)) { +- /* All i2c_msgs should be transferred using either dma or cpu */ +- for (idx = 0; idx < num; idx++) { +- if (msgs[idx].len == 0) { +- ret = -EINVAL; +- goto out; +- } +- +- if (is_vmalloc_addr(msgs[idx].buf)) +- break; +- +- total_len += msgs[idx].len; +- } +- +- if (idx == num && (total_len > qup->out_fifo_sz || +- total_len > qup->in_fifo_sz)) +- qup->use_dma = true; ++ if (qup_i2c_poll_state_i2c_master(qup)) { ++ ret = -EIO; ++ goto out; + } + +- idx = 0; ++ if (qup->use_dma) { ++ reinit_completion(&qup->xfer); ++ ret = qup_i2c_bam_xfer(adap, &msgs[0], num); ++ qup->use_dma = false; ++ } else { ++ qup_i2c_conf_mode_v2(qup); + +- do { +- if (msgs[idx].len == 0) { +- ret = -EINVAL; +- goto out; +- } ++ for (idx = 0; idx < num; idx++) { ++ qup->msg = &msgs[idx]; ++ qup->is_last = idx == (num - 1); + +- if (qup_i2c_poll_state_i2c_master(qup)) { +- ret = -EIO; +- goto out; ++ ret = qup_i2c_xfer_v2_msg(qup, idx, ++ !!(msgs[idx].flags & I2C_M_RD)); ++ if (ret) ++ break; + } ++ qup->msg = NULL; ++ } + +- qup->is_last = (idx == (num - 1)); +- if (idx) +- qup->config_run = QUP_I2C_MX_CONFIG_DURING_RUN; +- else +- qup->config_run = 0; +- +- reinit_completion(&qup->xfer); +- +- if (qup->use_dma) { +- ret = qup_i2c_bam_xfer(adap, &msgs[idx], num); +- qup->use_dma = false; +- break; +- } else { +- if (msgs[idx].flags & I2C_M_RD) +- ret = qup_i2c_read_one_v2(qup, &msgs[idx]); +- else +- ret = qup_i2c_write_one_v2(qup, &msgs[idx]); +- } +- } while ((idx++ < (num - 1)) && !ret); ++ if (!ret) ++ ret = qup_i2c_bus_active(qup, ONE_BYTE); + + if (!ret) +- ret = qup_i2c_change_state(qup, QUP_RESET_STATE); ++ qup_i2c_change_state(qup, QUP_RESET_STATE); + + if (ret == 0) + ret = num; +@@ -1545,6 +1659,7 @@ static int qup_i2c_probe(struct platform + u32 src_clk_freq = DEFAULT_SRC_CLK; + u32 clk_freq = DEFAULT_CLK_FREQ; + int blocks; ++ bool is_qup_v1; + + qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL); + if (!qup) +@@ -1563,12 +1678,10 @@ static int qup_i2c_probe(struct platform + if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) { + qup->adap.algo = &qup_i2c_algo; + qup->adap.quirks = &qup_i2c_quirks; +- qup->is_qup_v1 = true; +- qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1; +- qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1; +- qup->write_rx_tags = qup_i2c_write_rx_tags_v1; ++ is_qup_v1 = true; + } else { + qup->adap.algo = &qup_i2c_algo_v2; ++ is_qup_v1 = false; + ret = qup_i2c_req_dma(qup); + + if (ret == -EPROBE_DEFER) +@@ -1694,14 +1807,31 @@ nodma: + ret = -EIO; + goto fail; + } +- qup->out_blk_sz = blk_sizes[size] / 2; ++ qup->out_blk_sz = blk_sizes[size]; + + size = QUP_INPUT_BLOCK_SIZE(io_mode); + if (size >= ARRAY_SIZE(blk_sizes)) { + ret = -EIO; + goto fail; + } +- qup->in_blk_sz = blk_sizes[size] / 2; ++ qup->in_blk_sz = blk_sizes[size]; ++ ++ if (is_qup_v1) { ++ /* ++ * in QUP v1, QUP_CONFIG uses N as 15 i.e 16 bits constitutes a ++ * single transfer but the block size is in bytes so divide the ++ * in_blk_sz and out_blk_sz by 2 ++ */ ++ qup->in_blk_sz /= 2; ++ qup->out_blk_sz /= 2; ++ qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1; ++ qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1; ++ qup->write_rx_tags = qup_i2c_write_rx_tags_v1; ++ } else { ++ qup->write_tx_fifo = qup_i2c_write_tx_fifo_v2; ++ qup->read_rx_fifo = qup_i2c_read_rx_fifo_v2; ++ qup->write_rx_tags = qup_i2c_write_rx_tags_v2; ++ } + + size = QUP_OUTPUT_FIFO_SIZE(io_mode); + qup->out_fifo_sz = qup->out_blk_sz * (2 << size); -- cgit v1.2.3