summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-10-01 11:46:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-10-01 11:46:31 -0700
commitea1c6c592522208df1dcac9e8f1deb7cc56a51b7 (patch)
tree0a03e0f3a28877951a8bea54661d1b0dc151a522
parentad6657804c10f794228461683b6cf1585a313ac9 (diff)
parent2bfb20b65d9bc1d0de58f8c28ca9d6f1d27bbc01 (diff)
Merge tag 'spi-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "There's one big core change in this release, Jonas Gorski has addressed the issues with multiple chip selects which makes things more robust and stable. Otherwise there's quite a bit of driver work, as well as some new drivers several existing drivers have had quite a bit of work done on them. Possibly the most interesting thing is the VirtIO driver, this is apparently useful for some automotive applications which want to keep as small and robust a host system as they can, moving less critical functionality into guests. - James Clark has done some substantial updates on the Freescale DSPI driver, porting in code from the BSP and building onm top of that to fix some bugs and increase performance - Jonas Gorski has fixed the issues with handling multple chip selects, making things more robust and scalable - Support for higher performance modes in the NXP FSPI driver from Haibo Chen - Removal of the obsolete S3C2443 driver, the underlying SoC support has been removed from the kernel - Support for Amlogic AL113L2, Atmel SAMA7D65 and SAM9x7 and for VirtIO controllers" * tag 'spi-v6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (74 commits) spi: ljca: Remove Wentong's e-mail address spi: rename SPI_CS_CNT_MAX => SPI_DEVICE_CS_CNT_MAX spi: reduce device chip select limit again spi: don't check spi_controller::num_chipselect when parsing a dt device spi: drop check for validity of device chip selects spi: move unused device CS initialization to __spi_add_device() spi: keep track of number of chipselects in spi_device spi: fix return code when spi device has too many chipselects SPI: Add virtio SPI driver virtio-spi: Add virtio-spi.h virtio: Add ID for virtio SPI spi: rpc-if: Add resume support for RZ/G3E spi: rpc-if: Drop deprecated SIMPLE_DEV_PM_OPS spi: spi-qpic-snand: simplify clock handling by using devm_clk_get_enabled() spi: spi-nxp-fspi: Add OCT-DTR mode support spi: spi-nxp-fspi: add the support for sample data from DQS pad spi: spi-nxp-fspi: Add the DDR LUT command support spi: spi-nxp-fspi: set back to dll override mode when clock rate < 100MHz spi: spi-nxp-fspi: extract function nxp_fspi_dll_override() spi: atmel-quadspi: Add support for sama7d65 QSPI ...
-rw-r--r--Documentation/devicetree/bindings/spi/amlogic,a4-spifc.yaml82
-rw-r--r--Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml11
-rw-r--r--Documentation/devicetree/bindings/spi/atmel,quadspi.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/samsung,spi.yaml1
-rw-r--r--MAINTAINERS25
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c134
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c5
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c4
-rw-r--r--drivers/spi/spi-atmel.c78
-rw-r--r--drivers/spi/spi-axi-spi-engine.c17
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c36
-rw-r--r--drivers/spi/spi-fsl-dspi.c232
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c3
-rw-r--r--drivers/spi/spi-microchip-core.c3
-rw-r--r--drivers/spi/spi-mt65xx.c30
-rw-r--r--drivers/spi/spi-mtk-snfi.c1
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c117
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c5
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qpic-snand.c58
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-rpc-if.c12
-rw-r--r--drivers/spi/spi-s3c64xx.c19
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c6
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi.c85
-rw-r--r--include/linux/adi-axi-common.h21
-rw-r--r--include/linux/spi/spi.h16
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_spi.h181
42 files changed, 2583 insertions, 368 deletions
diff --git a/Documentation/devicetree/bindings/spi/amlogic,a4-spifc.yaml b/Documentation/devicetree/bindings/spi/amlogic,a4-spifc.yaml
new file mode 100644
index 000000000000..b4cef838bcd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/amlogic,a4-spifc.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2025 Amlogic, Inc. All rights reserved
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/amlogic,a4-spifc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI flash controller for Amlogic ARM SoCs
+
+maintainers:
+ - Liang Yang <liang.yang@amlogic.com>
+ - Feng Chen <feng.chen@amlogic.com>
+ - Xianwei Zhao <xianwei.zhao@amlogic.com>
+
+description:
+ The Amlogic SPI flash controller is an extended version of the Amlogic NAND
+ flash controller. It supports SPI Nor Flash and SPI NAND Flash(where the Host
+ ECC HW engine could be enabled).
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ const: amlogic,a4-spifc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: clock apb gate
+ - description: clock used for the controller
+
+ clock-names:
+ items:
+ - const: gate
+ - const: core
+
+ interrupts:
+ maxItems: 1
+
+ amlogic,rx-adj:
+ description:
+ Number of clock cycles by which sampling is delayed.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sfc0: spi@fe08d000 {
+ compatible = "amlogic,a4-spifc";
+ reg = <0xfe08d000 0x800>;
+ clocks = <&clkc_periphs 31>,
+ <&clkc_periphs 102>;
+ clock-names = "gate", "core";
+
+ pinctrl-0 = <&spiflash_default>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-ecc-engine = <&sfc0>;
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
index d29772994cf5..11885d0cc209 100644
--- a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
@@ -31,11 +31,16 @@ properties:
maxItems: 1
clock-names:
- contains:
- const: spi_clk
+ items:
+ - const: spi_clk
+ - const: spi_gclk
+ minItems: 1
clocks:
- maxItems: 1
+ items:
+ - description: Peripheral Bus clock
+ - description: Programmable Generic clock
+ minItems: 1
dmas:
items:
diff --git a/Documentation/devicetree/bindings/spi/atmel,quadspi.yaml b/Documentation/devicetree/bindings/spi/atmel,quadspi.yaml
index b0d99bc10535..30ab42c95c08 100644
--- a/Documentation/devicetree/bindings/spi/atmel,quadspi.yaml
+++ b/Documentation/devicetree/bindings/spi/atmel,quadspi.yaml
@@ -17,6 +17,9 @@ properties:
enum:
- atmel,sama5d2-qspi
- microchip,sam9x60-qspi
+ - microchip,sam9x7-ospi
+ - microchip,sama7d65-qspi
+ - microchip,sama7d65-ospi
- microchip,sama7g5-qspi
- microchip,sama7g5-ospi
diff --git a/Documentation/devicetree/bindings/spi/samsung,spi.yaml b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
index fe298d47b1a9..1ce8b2770a4a 100644
--- a/Documentation/devicetree/bindings/spi/samsung,spi.yaml
+++ b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
@@ -18,7 +18,6 @@ properties:
oneOf:
- enum:
- google,gs101-spi
- - samsung,s3c2443-spi # for S3C2443, S3C2416 and S3C2450
- samsung,s3c6410-spi
- samsung,s5pv210-spi # for S5PV210 and S5PC110
- samsung,exynos4210-spi
diff --git a/MAINTAINERS b/MAINTAINERS
index 31c218464710..2f7d96d778da 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1318,6 +1318,16 @@ S: Maintained
F: Documentation/devicetree/bindings/rtc/amlogic,a4-rtc.yaml
F: drivers/rtc/rtc-amlogic-a4.c
+AMLOGIC SPIFC DRIVER
+M: Liang Yang <liang.yang@amlogic.com>
+M: Feng Chen <feng.chen@amlogic.com>
+M: Xianwei Zhao <xianwei.zhao@amlogic.com>
+L: linux-amlogic@lists.infradead.org
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/amlogic,a4-spifc.yaml
+F: drivers/spi/spi-amlogic-spifc-a4.c
+
AMLOGIC SPISG DRIVER
M: Sunny Luo <sunny.luo@amlogic.com>
M: Xianwei Zhao <xianwei.zhao@amlogic.com>
@@ -25738,16 +25748,10 @@ W: https://github.com/srcres258/linux-doc
T: git https://github.com/srcres258/linux-doc.git doc-zh-tw
F: Documentation/translations/zh_TW/
-TRIGGER SOURCE - ADI UTIL SIGMA DELTA SPI
-M: David Lechner <dlechner@baylibre.com>
-S: Maintained
-F: Documentation/devicetree/bindings/trigger-source/adi,util-sigma-delta-spi.yaml
-
TRIGGER SOURCE
M: David Lechner <dlechner@baylibre.com>
S: Maintained
-F: Documentation/devicetree/bindings/trigger-source/gpio-trigger.yaml
-F: Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml
+F: Documentation/devicetree/bindings/trigger-source/*
TRUSTED SECURITY MODULE (TSM) INFRASTRUCTURE
M: Dan Williams <dan.j.williams@intel.com>
@@ -26897,6 +26901,13 @@ S: Maintained
F: include/uapi/linux/virtio_snd.h
F: sound/virtio/*
+VIRTIO SPI DRIVER
+M: Haixu Cui <quic_haixcui@quicinc.com>
+L: virtualization@lists.linux.dev
+S: Maintained
+F: drivers/spi/spi-virtio.c
+F: include/uapi/linux/virtio_spi.h
+
VIRTUAL BOX GUEST DEVICE DRIVER
M: Hans de Goede <hansg@kernel.org>
M: Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 891729c9c564..e8a39e304c7e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -99,6 +99,16 @@ config SPI_AMLOGIC_SPIFC_A1
This enables master mode support for the SPIFC (SPI flash
controller) available in Amlogic A1 (A113L SoC).
+config SPI_AMLOGIC_SPIFC_A4
+ tristate "Amlogic A4 SPI Flash controller"
+ depends on ARCH_MESON || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This enables SPI mode on the NAND Flash Controller of Amlogic
+ ARM SoCs. It supports SPI Nor Flash and SPI NAND Flash (Could
+ enable Host ECC HW engine). The controller implements the
+ SPI-MEM interface, it doesn't support generic SPI.
+
config SPI_AMLOGIC_SPISG
tristate "Amlogic SPISG controller"
depends on COMMON_CLK
@@ -916,7 +926,8 @@ config SPI_ROCKCHIP_SFC
config SPI_RB4XX
tristate "Mikrotik RB4XX SPI master"
- depends on SPI_MASTER && ATH79
+ depends on SPI_MASTER && (ATH79 || COMPILE_TEST)
+ depends on OF
help
SPI controller driver for the Mikrotik RB4xx series boards.
@@ -1224,6 +1235,17 @@ config SPI_UNIPHIER
If your SoC supports SCSSI, say Y here.
+config SPI_VIRTIO
+ tristate "Virtio SPI Controller"
+ depends on SPI_MASTER && VIRTIO
+ help
+ If you say yes to this option, support will be included for the virtio
+ SPI controller driver. The hardware can be emulated by any device model
+ software according to the virtio protocol.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-virtio.
+
config SPI_XCOMM
tristate "Analog Devices AD-FMCOMMS1-EBZ SPI-I2C-bridge driver"
depends on I2C
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 062c85989c8c..8ff74a13faaa 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o
obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o
obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o
obj-$(CONFIG_SPI_AMLOGIC_SPIFC_A1) += spi-amlogic-spifc-a1.o
+obj-$(CONFIG_SPI_AMLOGIC_SPIFC_A4) += spi-amlogic-spifc-a4.o
obj-$(CONFIG_SPI_AMLOGIC_SPISG) += spi-amlogic-spisg.o
obj-$(CONFIG_SPI_APPLE) += spi-apple.o
obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
@@ -158,6 +159,7 @@ spi-thunderx-objs := spi-cavium.o spi-cavium-thunderx.o
obj-$(CONFIG_SPI_THUNDERX) += spi-thunderx.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_UNIPHIER) += spi-uniphier.o
+obj-$(CONFIG_SPI_VIRTIO) += spi-virtio.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
obj-$(CONFIG_SPI_XLP) += spi-xlp.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 4e9bfd26aa80..d7a3d85d00c2 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -63,6 +63,7 @@
#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000
#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000
+#define SAM9X7_QSPI_MAX_SPEED_HZ 100000000
/* Bitfields in QSPI_CR (Control Register) */
#define QSPI_CR_QSPIEN BIT(0)
@@ -262,6 +263,9 @@ struct atmel_qspi_caps {
bool has_ricr;
bool octal;
bool has_dma;
+ bool has_2xgclk;
+ bool has_padcalib;
+ bool has_dllon;
};
struct atmel_qspi_ops;
@@ -1027,13 +1031,25 @@ static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
aq, QSPI_PCALCFG);
/* DLL On + start calibration. */
- atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
+ if (aq->caps->has_dllon)
+ atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
+ /* If there is no DLL support only start calibration. */
+ else
+ atmel_qspi_write(QSPI_CR_STPCAL, aq, QSPI_CR);
- /* Check synchronization status before updating configuration. */
- ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
- (val & QSPI_SR2_DLOCK) &&
- !(val & QSPI_SR2_CALBSY), 40,
- ATMEL_QSPI_TIMEOUT);
+ /*
+ * Check DLL clock lock and synchronization status before updating
+ * configuration.
+ */
+ if (aq->caps->has_dllon)
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ (val & QSPI_SR2_DLOCK) &&
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
+ else
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
/* Refresh analogic blocks every 1 ms.*/
atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
@@ -1049,23 +1065,28 @@ static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
int ret;
/* Disable DLL before setting GCLK */
- status = atmel_qspi_read(aq, QSPI_SR2);
- if (status & QSPI_SR2_DLOCK) {
- atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ if (aq->caps->has_dllon) {
+ status = atmel_qspi_read(aq, QSPI_SR2);
+ if (status & QSPI_SR2_DLOCK) {
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+ }
- ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
- !(val & QSPI_SR2_DLOCK), 40,
- ATMEL_QSPI_TIMEOUT);
- if (ret)
- return ret;
+ if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
+ atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
+ else
+ atmel_qspi_write(0, aq, QSPI_DLLCFG);
}
- if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
- atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
+ if (aq->caps->has_2xgclk)
+ ret = clk_set_rate(aq->gclk, 2 * aq->target_max_speed_hz);
else
- atmel_qspi_write(0, aq, QSPI_DLLCFG);
+ ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
- ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
if (ret) {
dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
return ret;
@@ -1088,11 +1109,16 @@ static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
if (ret)
return ret;
- if (aq->caps->octal) {
+ /*
+ * Check if the SoC supports pad calibration in Octal SPI mode.
+ * Proceed only if both the capabilities are true.
+ */
+ if (aq->caps->octal && aq->caps->has_padcalib) {
ret = atmel_qspi_set_pad_calibration(aq);
if (ret)
return ret;
- } else {
+ /* Start DLL on only if the SoC supports the same */
+ } else if (aq->caps->has_dllon) {
atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
(val & QSPI_SR2_DLOCK), 40,
@@ -1458,19 +1484,19 @@ static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
clk_disable_unprepare(aq->gclk);
- atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
- ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
- !(val & QSPI_SR2_DLOCK), 40,
- ATMEL_QSPI_TIMEOUT);
- if (ret)
- return ret;
-
- ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
- !(val & QSPI_SR2_CALBSY), 40,
- ATMEL_QSPI_TIMEOUT);
- if (ret)
- return ret;
+ if (aq->caps->has_dllon) {
+ atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
+ ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_DLOCK), 40,
+ ATMEL_QSPI_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ if (aq->caps->has_padcalib)
+ return readl_poll_timeout(aq->regs + QSPI_SR2, val,
+ !(val & QSPI_SR2_CALBSY), 40,
+ ATMEL_QSPI_TIMEOUT);
return 0;
}
@@ -1602,17 +1628,48 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_ricr = true,
};
+static const struct atmel_qspi_caps atmel_sam9x7_ospi_caps = {
+ .max_speed_hz = SAM9X7_QSPI_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .octal = true,
+ .has_dma = true,
+ .has_2xgclk = true,
+ .has_padcalib = false,
+ .has_dllon = false,
+};
+
+static const struct atmel_qspi_caps atmel_sama7d65_ospi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .octal = true,
+ .has_dma = true,
+ .has_2xgclk = true,
+ .has_padcalib = true,
+ .has_dllon = false,
+};
+
+static const struct atmel_qspi_caps atmel_sama7d65_qspi_caps = {
+ .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
+ .has_gclk = true,
+ .has_dma = true,
+ .has_2xgclk = true,
+ .has_dllon = false,
+};
+
static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
.has_gclk = true,
.octal = true,
.has_dma = true,
+ .has_padcalib = true,
+ .has_dllon = true,
};
static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
.has_gclk = true,
.has_dma = true,
+ .has_dllon = true,
};
static const struct of_device_id atmel_qspi_dt_ids[] = {
@@ -1632,6 +1689,19 @@ static const struct of_device_id atmel_qspi_dt_ids[] = {
.compatible = "microchip,sama7g5-qspi",
.data = &atmel_sama7g5_qspi_caps,
},
+ {
+ .compatible = "microchip,sam9x7-ospi",
+ .data = &atmel_sam9x7_ospi_caps,
+ },
+ {
+ .compatible = "microchip,sama7d65-ospi",
+ .data = &atmel_sama7d65_ospi_caps,
+ },
+ {
+ .compatible = "microchip,sama7d65-qspi",
+ .data = &atmel_sama7d65_qspi_caps,
+ },
+
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
index 585393802e9f..e163774fd65b 100644
--- a/drivers/spi/spi-altera-platform.c
+++ b/drivers/spi/spi-altera-platform.c
@@ -30,7 +30,6 @@ static const struct regmap_config spi_altera_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
static int altera_spi_probe(struct platform_device *pdev)
diff --git a/drivers/spi/spi-amd-pci.c b/drivers/spi/spi-amd-pci.c
index e5faab414c17..d48c3a5da303 100644
--- a/drivers/spi/spi-amd-pci.c
+++ b/drivers/spi/spi-amd-pci.c
@@ -38,7 +38,7 @@ static int amd_spi_pci_probe(struct pci_dev *pdev,
/* Allocate storage for host and driver private data */
host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
if (!host)
- return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+ return -ENOMEM;
amd_spi = spi_controller_get_devdata(host);
@@ -47,8 +47,7 @@ static int amd_spi_pci_probe(struct pci_dev *pdev,
amd_spi->io_remap_addr = devm_ioremap(dev, io_base_addr, AMD_HID2_MEM_SIZE);
if (!amd_spi->io_remap_addr)
- return dev_err_probe(dev, -ENOMEM,
- "ioremap of SPI registers failed\n");
+ return -ENOMEM;
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 02e7fe095a0b..4d1dce4f4974 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -857,7 +857,7 @@ static int amd_spi_probe(struct platform_device *pdev)
/* Allocate storage for host and driver private data */
host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
if (!host)
- return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+ return -ENOMEM;
amd_spi = spi_controller_get_devdata(host);
amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c
new file mode 100644
index 000000000000..4338d00e56a6
--- /dev/null
+++ b/drivers/spi/spi-amlogic-spifc-a4.c
@@ -0,0 +1,1222 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ *
+ * Driver for the SPI Mode of Amlogic Flash Controller
+ * Authors:
+ * Liang Yang <liang.yang@amlogic.com>
+ * Feng Chen <feng.chen@amlogic.com>
+ * Xianwei Zhao <xianwei.zhao@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
+
+#define SFC_CMD 0x00
+#define SFC_CFG 0x04
+#define SFC_DADR 0x08
+#define SFC_IADR 0x0c
+#define SFC_BUF 0x10
+#define SFC_INFO 0x14
+#define SFC_DC 0x18
+#define SFC_ADR 0x1c
+#define SFC_DL 0x20
+#define SFC_DH 0x24
+#define SFC_CADR 0x28
+#define SFC_SADR 0x2c
+#define SFC_RX_IDX 0x34
+#define SFC_RX_DAT 0x38
+#define SFC_SPI_CFG 0x40
+
+/* settings in SFC_CMD */
+
+/* 4 bits support 4 chip select, high false, low select but spi support 2*/
+#define CHIP_SELECT_MASK GENMASK(13, 10)
+#define CS_NONE 0xf
+#define CS_0 0xe
+#define CS_1 0xd
+
+#define CLE (0x5 << 14)
+#define ALE (0x6 << 14)
+#define DWR (0x4 << 14)
+#define DRD (0x8 << 14)
+#define DUMMY (0xb << 14)
+#define IDLE (0xc << 14)
+#define IDLE_CYCLE_MASK GENMASK(9, 0)
+#define EXT_CYCLE_MASK GENMASK(9, 0)
+
+#define OP_M2N ((0 << 17) | (2 << 20))
+#define OP_N2M ((1 << 17) | (2 << 20))
+#define OP_STS ((3 << 17) | (2 << 20))
+#define OP_ADL ((0 << 16) | (3 << 20))
+#define OP_ADH ((1 << 16) | (3 << 20))
+#define OP_AIL ((2 << 16) | (3 << 20))
+#define OP_AIH ((3 << 16) | (3 << 20))
+#define OP_ASL ((4 << 16) | (3 << 20))
+#define OP_ASH ((5 << 16) | (3 << 20))
+#define OP_SEED ((8 << 16) | (3 << 20))
+#define SEED_MASK GENMASK(14, 0)
+#define ENABLE_RANDOM BIT(19)
+
+#define CMD_COMMAND(cs_sel, cmd) (CLE | ((cs_sel) << 10) | (cmd))
+#define CMD_ADDR(cs_sel, addr) (ALE | ((cs_sel) << 10) | (addr))
+#define CMD_DUMMY(cs_sel, cyc) (DUMMY | ((cs_sel) << 10) | ((cyc) & EXT_CYCLE_MASK))
+#define CMD_IDLE(cs_sel, cyc) (IDLE | ((cs_sel) << 10) | ((cyc) & IDLE_CYCLE_MASK))
+#define CMD_MEM2NAND(bch, pages) (OP_M2N | ((bch) << 14) | (pages))
+#define CMD_NAND2MEM(bch, pages) (OP_N2M | ((bch) << 14) | (pages))
+#define CMD_DATA_ADDRL(addr) (OP_ADL | ((addr) & 0xffff))
+#define CMD_DATA_ADDRH(addr) (OP_ADH | (((addr) >> 16) & 0xffff))
+#define CMD_INFO_ADDRL(addr) (OP_AIL | ((addr) & 0xffff))
+#define CMD_INFO_ADDRH(addr) (OP_AIH | (((addr) >> 16) & 0xffff))
+#define CMD_SEED(seed) (OP_SEED | ((seed) & SEED_MASK))
+
+#define GET_CMD_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
+
+#define DEFAULT_PULLUP_CYCLE 2
+#define CS_SETUP_CYCLE 1
+#define CS_HOLD_CYCLE 2
+#define DEFAULT_BUS_CYCLE 4
+
+#define RAW_SIZE GENMASK(13, 0)
+#define RAW_SIZE_BW 14
+
+#define DMA_ADDR_ALIGN 8
+
+/* Bit fields in SFC_SPI_CFG */
+#define SPI_MODE_EN BIT(31)
+#define RAW_EXT_SIZE GENMASK(29, 18)
+#define ADDR_LANE GENMASK(17, 16)
+#define CPOL BIT(15)
+#define CPHA BIT(14)
+#define EN_HOLD BIT(13)
+#define EN_WP BIT(12)
+#define TXADJ GENMASK(11, 8)
+#define RXADJ GENMASK(7, 4)
+#define CMD_LANE GENMASK(3, 2)
+#define DATA_LANE GENMASK(1, 0)
+#define LANE_MAX 0x3
+
+/* raw ext size[25:14] + raw size[13:0] */
+#define RAW_MAX_RW_SIZE_MASK GENMASK(25, 0)
+
+/* Ecc fields */
+#define ECC_COMPLETE BIT(31)
+#define ECC_UNCORRECTABLE 0x3f
+#define ECC_ERR_CNT(x) (((x) >> 24) & 0x3f)
+#define ECC_ZERO_CNT(x) (((x) >> 16) & 0x3f)
+
+#define ECC_BCH8_512 1
+#define ECC_BCH8_1K 2
+#define ECC_BCH8_PARITY_BYTES 14
+#define ECC_BCH8_USER_BYTES 2
+#define ECC_BCH8_INFO_BYTES (ECC_BCH8_USER_BYTES + ECC_BCH8_PARITY_BYTES)
+#define ECC_BCH8_STRENGTH 8
+#define ECC_BCH8_DEFAULT_STEP 512
+#define ECC_DEFAULT_BCH_MODE ECC_BCH8_512
+#define ECC_PER_INFO_BYTE 8
+#define ECC_PATTERN 0x5a
+#define ECC_BCH_MAX_SECT_SIZE 63
+/* soft flags for sfc */
+#define SFC_HWECC BIT(0)
+#define SFC_DATA_RANDOM BIT(1)
+#define SFC_DATA_ONLY BIT(2)
+#define SFC_OOB_ONLY BIT(3)
+#define SFC_DATA_OOB BIT(4)
+#define SFC_AUTO_OOB BIT(5)
+#define SFC_RAW_RW BIT(6)
+#define SFC_XFER_MDOE_MASK GENMASK(6, 2)
+
+#define SFC_DATABUF_SIZE 8192
+#define SFC_INFOBUF_SIZE 256
+#define SFC_BUF_SIZE (SFC_DATABUF_SIZE + SFC_INFOBUF_SIZE)
+
+/* !!! PCB and SPI-NAND chip limitations */
+#define SFC_MAX_FREQUENCY (250 * 1000 * 1000)
+#define SFC_MIN_FREQUENCY (4 * 1000 * 1000)
+#define SFC_BUS_DEFAULT_CLK 40000000
+#define SFC_MAX_CS_NUM 2
+
+/* SPI-FLASH R/W operation cmd */
+#define SPIFLASH_RD_OCTALIO 0xcb
+#define SPIFLASH_RD_OCTAL 0x8b
+#define SPIFLASH_RD_QUADIO 0xeb
+#define SPIFLASH_RD_QUAD 0x6b
+#define SPIFLASH_RD_DUALIO 0xbb
+#define SPIFLASH_RD_DUAL 0x3b
+#define SPIFLASH_RD_FAST 0x0b
+#define SPIFLASH_RD 0x03
+#define SPIFLASH_WR_OCTALIO 0xC2
+#define SPIFLASH_WR_OCTAL 0x82
+#define SPIFLASH_WR_QUAD 0x32
+#define SPIFLASH_WR 0x02
+#define SPIFLASH_UP_QUAD 0x34
+#define SPIFLASH_UP 0x84
+
+struct aml_sfc_ecc_cfg {
+ u32 stepsize;
+ u32 nsteps;
+ u32 strength;
+ u32 oobsize;
+ u32 bch;
+};
+
+struct aml_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct aml_sfc_caps {
+ struct aml_sfc_ecc_cfg *ecc_caps;
+ u32 num_ecc_caps;
+};
+
+struct aml_sfc {
+ struct device *dev;
+ struct clk *gate_clk;
+ struct clk *core_clk;
+ struct spi_controller *ctrl;
+ struct regmap *regmap_base;
+ const struct aml_sfc_caps *caps;
+ struct nand_ecc_engine ecc_eng;
+ struct aml_ecc_stats ecc_stats;
+ dma_addr_t daddr;
+ dma_addr_t iaddr;
+ u32 info_bytes;
+ u32 bus_rate;
+ u32 flags;
+ u32 rx_adj;
+ u32 cs_sel;
+ u8 *data_buf;
+ __le64 *info_buf;
+ u8 *priv;
+};
+
+#define AML_ECC_DATA(sz, s, b) { .stepsize = (sz), .strength = (s), .bch = (b) }
+
+static struct aml_sfc_ecc_cfg aml_a113l2_ecc_caps[] = {
+ AML_ECC_DATA(512, 8, ECC_BCH8_512),
+ AML_ECC_DATA(1024, 8, ECC_BCH8_1K),
+};
+
+static const struct aml_sfc_caps aml_a113l2_sfc_caps = {
+ .ecc_caps = aml_a113l2_ecc_caps,
+ .num_ecc_caps = ARRAY_SIZE(aml_a113l2_ecc_caps)
+};
+
+static struct aml_sfc *nand_to_aml_sfc(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ return container_of(eng, struct aml_sfc, ecc_eng);
+}
+
+static inline void *aml_sfc_to_ecc_ctx(struct aml_sfc *sfc)
+{
+ return sfc->priv;
+}
+
+static int aml_sfc_wait_cmd_finish(struct aml_sfc *sfc, u64 timeout_ms)
+{
+ u32 cmd_size = 0;
+ int ret;
+
+ /*
+ * The SPINAND flash controller employs a two-stage pipeline:
+ * 1) command prefetch; 2) command execution.
+ *
+ * All commands are stored in the FIFO, with one prefetched for execution.
+ *
+ * There are cases where the FIFO is detected as empty, yet a command may
+ * still be in execution and a prefetched command pending execution.
+ *
+ * So, send two idle commands to ensure all previous commands have
+ * been executed.
+ */
+ regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
+ regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0));
+
+ /* Wait for the FIFO to empty. */
+ ret = regmap_read_poll_timeout(sfc->regmap_base, SFC_CMD, cmd_size,
+ !GET_CMD_SIZE(cmd_size),
+ 10, timeout_ms * 1000);
+ if (ret)
+ dev_err(sfc->dev, "wait for empty CMD FIFO time out\n");
+
+ return ret;
+}
+
+static int aml_sfc_pre_transfer(struct aml_sfc *sfc, u32 idle_cycle, u32 cs2clk_cycle)
+{
+ int ret;
+
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(CS_NONE, idle_cycle));
+ if (ret)
+ return ret;
+
+ return regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, cs2clk_cycle));
+}
+
+static int aml_sfc_end_transfer(struct aml_sfc *sfc, u32 clk2cs_cycle)
+{
+ int ret;
+
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, clk2cs_cycle));
+ if (ret)
+ return ret;
+
+ return aml_sfc_wait_cmd_finish(sfc, 0);
+}
+
+static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
+{
+ int i;
+ u32 conf = 0;
+
+ for (i = 0; i <= LANE_MAX; i++) {
+ if (buswidth == 1 << i) {
+ conf = i << __bf_shf(mask);
+ return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
+ mask, conf);
+ }
+ }
+
+ return 0;
+}
+
+static int aml_sfc_send_cmd(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ int i, ret;
+ u8 val;
+
+ ret = aml_sfc_set_bus_width(sfc, op->cmd.buswidth, CMD_LANE);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < op->cmd.nbytes; i++) {
+ val = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_COMMAND(sfc->cs_sel, val));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aml_sfc_send_addr(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ int i, ret;
+ u8 val;
+
+ ret = aml_sfc_set_bus_width(sfc, op->addr.buswidth, ADDR_LANE);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < op->addr.nbytes; i++) {
+ val = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
+
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_ADDR(sfc->cs_sel, val));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool aml_sfc_is_xio_op(const struct spi_mem_op *op)
+{
+ switch (op->cmd.opcode) {
+ case SPIFLASH_RD_OCTALIO:
+ case SPIFLASH_RD_QUADIO:
+ case SPIFLASH_RD_DUALIO:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int aml_sfc_send_cmd_addr_dummy(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ u32 dummy_cycle, cmd;
+ int ret;
+
+ ret = aml_sfc_send_cmd(sfc, op);
+ if (ret)
+ return ret;
+
+ ret = aml_sfc_send_addr(sfc, op);
+ if (ret)
+ return ret;
+
+ if (op->dummy.nbytes) {
+ /* Dummy buswidth configuration is not supported */
+ if (aml_sfc_is_xio_op(op))
+ dummy_cycle = op->dummy.nbytes * 8 / op->data.buswidth;
+ else
+ dummy_cycle = op->dummy.nbytes * 8;
+ cmd = CMD_DUMMY(sfc->cs_sel, dummy_cycle - 1);
+ return regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ }
+
+ return 0;
+}
+
+static bool aml_sfc_is_snand_hwecc_page_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ switch (op->cmd.opcode) {
+ /* SPINAND read from cache cmd */
+ case SPIFLASH_RD_QUADIO:
+ case SPIFLASH_RD_QUAD:
+ case SPIFLASH_RD_DUALIO:
+ case SPIFLASH_RD_DUAL:
+ case SPIFLASH_RD_FAST:
+ case SPIFLASH_RD:
+ /* SPINAND write to cache cmd */
+ case SPIFLASH_WR_QUAD:
+ case SPIFLASH_WR:
+ case SPIFLASH_UP_QUAD:
+ case SPIFLASH_UP:
+ if (sfc->flags & SFC_HWECC)
+ return true;
+ else
+ return false;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
+ int datalen, void *infobuf, int infolen,
+ enum dma_data_direction dir)
+{
+ u32 cmd = 0;
+ int ret;
+
+ sfc->daddr = dma_map_single(sfc->dev, databuf, datalen, dir);
+ ret = dma_mapping_error(sfc->dev, sfc->daddr);
+ if (ret) {
+ dev_err(sfc->dev, "DMA mapping error\n");
+ goto out_map_data;
+ }
+
+ cmd = CMD_DATA_ADDRL(sfc->daddr);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto out_map_data;
+
+ cmd = CMD_DATA_ADDRH(sfc->daddr);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto out_map_data;
+
+ if (infobuf) {
+ sfc->iaddr = dma_map_single(sfc->dev, infobuf, infolen, dir);
+ ret = dma_mapping_error(sfc->dev, sfc->iaddr);
+ if (ret) {
+ dev_err(sfc->dev, "DMA mapping error\n");
+ dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
+ goto out_map_data;
+ }
+
+ sfc->info_bytes = infolen;
+ cmd = CMD_INFO_ADDRL(sfc->iaddr);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto out_map_info;
+
+ cmd = CMD_INFO_ADDRH(sfc->iaddr);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto out_map_info;
+ }
+
+ return 0;
+
+out_map_info:
+ dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir);
+out_map_data:
+ dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
+
+ return ret;
+}
+
+static void aml_sfc_dma_buffer_release(struct aml_sfc *sfc,
+ int datalen, int infolen,
+ enum dma_data_direction dir)
+{
+ dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
+ if (infolen) {
+ dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir);
+ sfc->info_bytes = 0;
+ }
+}
+
+static bool aml_sfc_dma_buffer_is_safe(const void *buffer)
+{
+ if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
+ return false;
+
+ if (virt_addr_valid(buffer))
+ return true;
+
+ return false;
+}
+
+static void *aml_get_dma_safe_input_buf(const struct spi_mem_op *op)
+{
+ if (aml_sfc_dma_buffer_is_safe(op->data.buf.in))
+ return op->data.buf.in;
+
+ return kzalloc(op->data.nbytes, GFP_KERNEL);
+}
+
+static void aml_sfc_put_dma_safe_input_buf(const struct spi_mem_op *op, void *buf)
+{
+ if (WARN_ON(op->data.dir != SPI_MEM_DATA_IN) || WARN_ON(!buf))
+ return;
+
+ if (buf == op->data.buf.in)
+ return;
+
+ memcpy(op->data.buf.in, buf, op->data.nbytes);
+ kfree(buf);
+}
+
+static void *aml_sfc_get_dma_safe_output_buf(const struct spi_mem_op *op)
+{
+ if (aml_sfc_dma_buffer_is_safe(op->data.buf.out))
+ return (void *)op->data.buf.out;
+
+ return kmemdup(op->data.buf.out, op->data.nbytes, GFP_KERNEL);
+}
+
+static void aml_sfc_put_dma_safe_output_buf(const struct spi_mem_op *op, const void *buf)
+{
+ if (WARN_ON(op->data.dir != SPI_MEM_DATA_OUT) || WARN_ON(!buf))
+ return;
+
+ if (buf != op->data.buf.out)
+ kfree(buf);
+}
+
+static u64 aml_sfc_cal_timeout_cycle(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ u64 ms;
+
+ /* For each byte we wait for (8 cycles / buswidth) of the SPI clock. */
+ ms = 8 * MSEC_PER_SEC * op->data.nbytes / op->data.buswidth;
+ do_div(ms, sfc->bus_rate / DEFAULT_BUS_CYCLE);
+
+ /*
+ * Double the value and add a 200 ms tolerance to compensate for
+ * the impact of specific CS hold time, CS setup time sequences,
+ * controller burst gaps, and other related timing variations.
+ */
+ ms += ms + 200;
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ return ms;
+}
+
+static void aml_sfc_check_ecc_pages_valid(struct aml_sfc *sfc, bool raw)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ __le64 *info;
+ int ret;
+
+ info = sfc->info_buf;
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+ info += raw ? 0 : ecc_cfg->nsteps - 1;
+
+ do {
+ usleep_range(10, 15);
+ /* info is updated by nfc dma engine*/
+ smp_rmb();
+ dma_sync_single_for_cpu(sfc->dev, sfc->iaddr, sfc->info_bytes,
+ DMA_FROM_DEVICE);
+ ret = le64_to_cpu(*info) & ECC_COMPLETE;
+ } while (!ret);
+}
+
+static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ void *buf = NULL;
+ int ret;
+ bool is_datain = false;
+ u32 cmd = 0, conf;
+ u64 timeout_ms;
+
+ if (!op->data.nbytes)
+ goto end_xfer;
+
+ conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE);
+ ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
+ if (ret)
+ goto err_out;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ is_datain = true;
+
+ buf = aml_get_dma_safe_input_buf(op);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ cmd |= CMD_NAND2MEM(0, (op->data.nbytes & RAW_SIZE));
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ is_datain = false;
+
+ buf = aml_sfc_get_dma_safe_output_buf(op);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ cmd |= CMD_MEM2NAND(0, (op->data.nbytes & RAW_SIZE));
+ } else {
+ goto end_xfer;
+ }
+
+ ret = aml_sfc_dma_buffer_setup(sfc, buf, op->data.nbytes,
+ is_datain ? sfc->info_buf : NULL,
+ is_datain ? ECC_PER_INFO_BYTE : 0,
+ is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (ret)
+ goto err_out;
+
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto err_out;
+
+ timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
+ ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
+ if (ret)
+ goto err_out;
+
+ if (is_datain)
+ aml_sfc_check_ecc_pages_valid(sfc, 1);
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ aml_sfc_put_dma_safe_input_buf(op, buf);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ aml_sfc_put_dma_safe_output_buf(op, buf);
+
+ aml_sfc_dma_buffer_release(sfc, op->data.nbytes,
+ is_datain ? ECC_PER_INFO_BYTE : 0,
+ is_datain ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
+end_xfer:
+ return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
+
+err_out:
+ return ret;
+}
+
+static void aml_sfc_set_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf, bool auto_oob)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ __le64 *info;
+ int i, count, step_size;
+
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ step_size = auto_oob ? ECC_BCH8_INFO_BYTES : ECC_BCH8_USER_BYTES;
+
+ for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += step_size) {
+ info = &info_buf[i];
+ *info &= cpu_to_le64(~0xffff);
+ *info |= cpu_to_le64((oob_buf[count + 1] << 8) + oob_buf[count]);
+ }
+}
+
+static void aml_sfc_get_user_byte(struct aml_sfc *sfc, __le64 *info_buf, u8 *oob_buf)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ __le64 *info;
+ int i, count;
+
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += ECC_BCH8_INFO_BYTES) {
+ info = &info_buf[i];
+ oob_buf[count] = le64_to_cpu(*info);
+ oob_buf[count + 1] = le64_to_cpu(*info) >> 8;
+ }
+}
+
+static int aml_sfc_check_hwecc_status(struct aml_sfc *sfc, __le64 *info_buf)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ __le64 *info;
+ u32 i, max_bitflips = 0, per_sector_bitflips = 0;
+
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ sfc->ecc_stats.failed = 0;
+ sfc->ecc_stats.bitflips = 0;
+ sfc->ecc_stats.corrected = 0;
+
+ for (i = 0, info = info_buf; i < ecc_cfg->nsteps; i++, info++) {
+ if (ECC_ERR_CNT(le64_to_cpu(*info)) != ECC_UNCORRECTABLE) {
+ per_sector_bitflips = ECC_ERR_CNT(le64_to_cpu(*info));
+ max_bitflips = max_t(u32, max_bitflips, per_sector_bitflips);
+ sfc->ecc_stats.corrected += per_sector_bitflips;
+ continue;
+ }
+
+ return -EBADMSG;
+ }
+
+ return max_bitflips;
+}
+
+static int aml_sfc_read_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ int ret, data_len, info_len;
+ u32 page_size, cmd = 0;
+ u64 timeout_ms;
+
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
+ data_len = page_size + ecc_cfg->oobsize;
+ info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
+
+ ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
+ sfc->info_buf, info_len, DMA_FROM_DEVICE);
+ if (ret)
+ goto err_out;
+
+ cmd |= CMD_NAND2MEM(ecc_cfg->bch, ecc_cfg->nsteps);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto err_out;
+
+ timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
+ ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
+ if (ret)
+ goto err_out;
+
+ aml_sfc_check_ecc_pages_valid(sfc, 0);
+ aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_FROM_DEVICE);
+
+ /* check ecc status here */
+ ret = aml_sfc_check_hwecc_status(sfc, sfc->info_buf);
+ if (ret < 0)
+ sfc->ecc_stats.failed++;
+ else
+ sfc->ecc_stats.bitflips = ret;
+
+ if (sfc->flags & SFC_DATA_ONLY) {
+ memcpy(op->data.buf.in, sfc->data_buf, page_size);
+ } else if (sfc->flags & SFC_OOB_ONLY) {
+ aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in);
+ } else if (sfc->flags & SFC_DATA_OOB) {
+ memcpy(op->data.buf.in, sfc->data_buf, page_size);
+ aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in + page_size);
+ }
+
+ return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
+
+err_out:
+ return ret;
+}
+
+static int aml_sfc_write_page_hwecc(struct aml_sfc *sfc, const struct spi_mem_op *op)
+{
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ int ret, data_len, info_len;
+ u32 page_size, cmd = 0;
+ u64 timeout_ms;
+
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ page_size = ecc_cfg->stepsize * ecc_cfg->nsteps;
+ data_len = page_size + ecc_cfg->oobsize;
+ info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE;
+
+ memset(sfc->info_buf, ECC_PATTERN, ecc_cfg->oobsize);
+ memcpy(sfc->data_buf, op->data.buf.out, page_size);
+
+ if (!(sfc->flags & SFC_DATA_ONLY)) {
+ if (sfc->flags & SFC_AUTO_OOB)
+ aml_sfc_set_user_byte(sfc, sfc->info_buf,
+ (u8 *)op->data.buf.out + page_size, 1);
+ else
+ aml_sfc_set_user_byte(sfc, sfc->info_buf,
+ (u8 *)op->data.buf.out + page_size, 0);
+ }
+
+ ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len,
+ sfc->info_buf, info_len, DMA_TO_DEVICE);
+ if (ret)
+ goto err_out;
+
+ cmd |= CMD_MEM2NAND(ecc_cfg->bch, ecc_cfg->nsteps);
+ ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd);
+ if (ret)
+ goto err_out;
+
+ timeout_ms = aml_sfc_cal_timeout_cycle(sfc, op);
+
+ ret = aml_sfc_wait_cmd_finish(sfc, timeout_ms);
+ if (ret)
+ goto err_out;
+
+ aml_sfc_dma_buffer_release(sfc, data_len, info_len, DMA_TO_DEVICE);
+
+ return aml_sfc_end_transfer(sfc, CS_HOLD_CYCLE);
+
+err_out:
+ return ret;
+}
+
+static int aml_sfc_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct aml_sfc *sfc;
+ struct spi_device *spi;
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ int ret;
+
+ sfc = spi_controller_get_devdata(mem->spi->controller);
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+ spi = mem->spi;
+ sfc->cs_sel = spi->chip_select[0] ? CS_1 : CS_0;
+
+ dev_dbg(sfc->dev, "cmd:0x%02x - addr:%08llX@%d:%u - dummy:%d:%u - data:%d:%u",
+ op->cmd.opcode, op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->dummy.buswidth, op->dummy.nbytes, op->data.buswidth, op->data.nbytes);
+
+ ret = aml_sfc_pre_transfer(sfc, DEFAULT_PULLUP_CYCLE, CS_SETUP_CYCLE);
+ if (ret)
+ return ret;
+
+ ret = aml_sfc_send_cmd_addr_dummy(sfc, op);
+ if (ret)
+ return ret;
+
+ ret = aml_sfc_set_bus_width(sfc, op->data.buswidth, DATA_LANE);
+ if (ret)
+ return ret;
+
+ if (aml_sfc_is_snand_hwecc_page_op(sfc, op) &&
+ ecc_cfg && !(sfc->flags & SFC_RAW_RW)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return aml_sfc_read_page_hwecc(sfc, op);
+ else
+ return aml_sfc_write_page_hwecc(sfc, op);
+ }
+
+ return aml_sfc_raw_io_op(sfc, op);
+}
+
+static int aml_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct aml_sfc *sfc;
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+
+ sfc = spi_controller_get_devdata(mem->spi->controller);
+ ecc_cfg = aml_sfc_to_ecc_ctx(sfc);
+
+ if (aml_sfc_is_snand_hwecc_page_op(sfc, op) && ecc_cfg) {
+ if (op->data.nbytes > ecc_cfg->stepsize * ECC_BCH_MAX_SECT_SIZE)
+ return -EOPNOTSUPP;
+ } else if (op->data.nbytes & ~RAW_MAX_RW_SIZE_MASK) {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops aml_sfc_mem_ops = {
+ .adjust_op_size = aml_sfc_adjust_op_size,
+ .exec_op = aml_sfc_exec_op,
+};
+
+static int aml_sfc_layout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+
+ if (section >= nand->ecc.ctx.nsteps)
+ return -ERANGE;
+
+ oobregion->offset = ECC_BCH8_USER_BYTES + (section * ECC_BCH8_INFO_BYTES);
+ oobregion->length = ECC_BCH8_PARITY_BYTES;
+
+ return 0;
+}
+
+static int aml_sfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+
+ if (section >= nand->ecc.ctx.nsteps)
+ return -ERANGE;
+
+ oobregion->offset = section * ECC_BCH8_INFO_BYTES;
+ oobregion->length = ECC_BCH8_USER_BYTES;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops aml_sfc_ooblayout_ops = {
+ .ecc = aml_sfc_layout_ecc,
+ .free = aml_sfc_ooblayout_free,
+};
+
+static int aml_spi_settings(struct aml_sfc *sfc, struct spi_device *spi)
+{
+ u32 conf = 0;
+
+ if (spi->mode & SPI_CPHA)
+ conf |= CPHA;
+
+ if (spi->mode & SPI_CPOL)
+ conf |= CPOL;
+
+ conf |= FIELD_PREP(RXADJ, sfc->rx_adj);
+ conf |= EN_HOLD | EN_WP;
+ return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
+ CPHA | CPOL | RXADJ |
+ EN_HOLD | EN_WP, conf);
+}
+
+static int aml_set_spi_clk(struct aml_sfc *sfc, struct spi_device *spi)
+{
+ u32 speed_hz;
+ int ret;
+
+ if (spi->max_speed_hz > SFC_MAX_FREQUENCY)
+ speed_hz = SFC_MAX_FREQUENCY;
+ else if (!spi->max_speed_hz)
+ speed_hz = SFC_BUS_DEFAULT_CLK;
+ else if (spi->max_speed_hz < SFC_MIN_FREQUENCY)
+ speed_hz = SFC_MIN_FREQUENCY;
+ else
+ speed_hz = spi->max_speed_hz;
+
+ /* The SPI clock is generated by dividing the bus clock by four by default. */
+ ret = regmap_write(sfc->regmap_base, SFC_CFG, (DEFAULT_BUS_CYCLE - 1));
+ if (ret) {
+ dev_err(sfc->dev, "failed to set bus cycle\n");
+ return ret;
+ }
+
+ return clk_set_rate(sfc->core_clk, speed_hz * DEFAULT_BUS_CYCLE);
+}
+
+static int aml_sfc_setup(struct spi_device *spi)
+{
+ struct aml_sfc *sfc;
+ int ret;
+
+ sfc = spi_controller_get_devdata(spi->controller);
+ ret = aml_spi_settings(sfc, spi);
+ if (ret)
+ return ret;
+
+ ret = aml_set_spi_clk(sfc, spi);
+ if (ret)
+ return ret;
+
+ sfc->bus_rate = clk_get_rate(sfc->core_clk);
+
+ return 0;
+}
+
+static int aml_sfc_ecc_init_ctx(struct nand_device *nand)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct aml_sfc *sfc = nand_to_aml_sfc(nand);
+ struct aml_sfc_ecc_cfg *ecc_cfg;
+ const struct aml_sfc_caps *caps = sfc->caps;
+ struct aml_sfc_ecc_cfg *ecc_caps = caps->ecc_caps;
+ int i, ecc_strength, ecc_step_size;
+
+ ecc_step_size = nand->ecc.user_conf.step_size;
+ ecc_strength = nand->ecc.user_conf.strength;
+
+ for (i = 0; i < caps->num_ecc_caps; i++) {
+ if (ecc_caps[i].stepsize == ecc_step_size) {
+ nand->ecc.ctx.conf.step_size = ecc_step_size;
+ nand->ecc.ctx.conf.flags |= BIT(ecc_caps[i].bch);
+ }
+
+ if (ecc_caps[i].strength == ecc_strength)
+ nand->ecc.ctx.conf.strength = ecc_strength;
+ }
+
+ if (!nand->ecc.ctx.conf.step_size) {
+ nand->ecc.ctx.conf.step_size = ECC_BCH8_DEFAULT_STEP;
+ nand->ecc.ctx.conf.flags |= BIT(ECC_DEFAULT_BCH_MODE);
+ }
+
+ if (!nand->ecc.ctx.conf.strength)
+ nand->ecc.ctx.conf.strength = ECC_BCH8_STRENGTH;
+
+ nand->ecc.ctx.nsteps = nand->memorg.pagesize / nand->ecc.ctx.conf.step_size;
+ nand->ecc.ctx.total = nand->ecc.ctx.nsteps * ECC_BCH8_PARITY_BYTES;
+
+ /* Verify the page size and OOB size against the SFC requirements. */
+ if ((nand->memorg.pagesize % nand->ecc.ctx.conf.step_size) ||
+ (nand->memorg.oobsize < (nand->ecc.ctx.total +
+ nand->ecc.ctx.nsteps * ECC_BCH8_USER_BYTES)))
+ return -EOPNOTSUPP;
+
+ nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+
+ ecc_cfg->stepsize = nand->ecc.ctx.conf.step_size;
+ ecc_cfg->nsteps = nand->ecc.ctx.nsteps;
+ ecc_cfg->strength = nand->ecc.ctx.conf.strength;
+ ecc_cfg->oobsize = nand->memorg.oobsize;
+ ecc_cfg->bch = nand->ecc.ctx.conf.flags & BIT(ECC_DEFAULT_BCH_MODE) ? 1 : 2;
+
+ nand->ecc.ctx.priv = ecc_cfg;
+ sfc->priv = (void *)ecc_cfg;
+ mtd_set_ooblayout(mtd, &aml_sfc_ooblayout_ops);
+
+ sfc->flags |= SFC_HWECC;
+
+ return 0;
+}
+
+static void aml_sfc_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct aml_sfc *sfc = nand_to_aml_sfc(nand);
+
+ sfc->flags &= ~(SFC_HWECC);
+ kfree(nand->ecc.ctx.priv);
+ sfc->priv = NULL;
+}
+
+static int aml_sfc_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct aml_sfc *sfc = nand_to_aml_sfc(nand);
+ struct spinand_device *spinand = nand_to_spinand(nand);
+
+ sfc->flags &= ~SFC_XFER_MDOE_MASK;
+
+ if (req->datalen && !req->ooblen)
+ sfc->flags |= SFC_DATA_ONLY;
+ else if (!req->datalen && req->ooblen)
+ sfc->flags |= SFC_OOB_ONLY;
+ else if (req->datalen && req->ooblen)
+ sfc->flags |= SFC_DATA_OOB;
+
+ if (req->mode == MTD_OPS_RAW)
+ sfc->flags |= SFC_RAW_RW;
+ else if (req->mode == MTD_OPS_AUTO_OOB)
+ sfc->flags |= SFC_AUTO_OOB;
+
+ memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
+
+ return 0;
+}
+
+static int aml_sfc_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct aml_sfc *sfc = nand_to_aml_sfc(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (req->mode == MTD_OPS_RAW || req->type == NAND_PAGE_WRITE)
+ return 0;
+
+ if (sfc->ecc_stats.failed)
+ mtd->ecc_stats.failed++;
+
+ mtd->ecc_stats.corrected += sfc->ecc_stats.corrected;
+
+ return sfc->ecc_stats.failed ? -EBADMSG : sfc->ecc_stats.bitflips;
+}
+
+static const struct spi_controller_mem_caps aml_sfc_mem_caps = {
+ .ecc = true,
+};
+
+static const struct nand_ecc_engine_ops aml_sfc_ecc_engine_ops = {
+ .init_ctx = aml_sfc_ecc_init_ctx,
+ .cleanup_ctx = aml_sfc_ecc_cleanup_ctx,
+ .prepare_io_req = aml_sfc_ecc_prepare_io_req,
+ .finish_io_req = aml_sfc_ecc_finish_io_req,
+};
+
+static int aml_sfc_clk_init(struct aml_sfc *sfc)
+{
+ sfc->gate_clk = devm_clk_get_enabled(sfc->dev, "gate");
+ if (IS_ERR(sfc->gate_clk)) {
+ dev_err(sfc->dev, "unable to enable gate clk\n");
+ return PTR_ERR(sfc->gate_clk);
+ }
+
+ sfc->core_clk = devm_clk_get_enabled(sfc->dev, "core");
+ if (IS_ERR(sfc->core_clk)) {
+ dev_err(sfc->dev, "unable to enable core clk\n");
+ return PTR_ERR(sfc->core_clk);
+ }
+
+ return clk_set_rate(sfc->core_clk, SFC_BUS_DEFAULT_CLK);
+}
+
+static int aml_sfc_disable_clk(struct aml_sfc *sfc)
+{
+ clk_disable_unprepare(sfc->core_clk);
+ clk_disable_unprepare(sfc->gate_clk);
+
+ return 0;
+}
+
+static int aml_sfc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct aml_sfc *sfc;
+ void __iomem *reg_base;
+ int ret;
+ u32 val = 0;
+
+ const struct regmap_config core_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SFC_SPI_CFG,
+ };
+
+ ctrl = devm_spi_alloc_host(dev, sizeof(*sfc));
+ if (!ctrl)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ctrl);
+
+ sfc = spi_controller_get_devdata(ctrl);
+ sfc->dev = dev;
+ sfc->ctrl = ctrl;
+
+ sfc->caps = of_device_get_match_data(dev);
+ if (!sfc->caps)
+ return dev_err_probe(dev, -ENODEV, "failed to get device data\n");
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ sfc->regmap_base = devm_regmap_init_mmio(dev, reg_base, &core_config);
+ if (IS_ERR(sfc->regmap_base))
+ return dev_err_probe(dev, PTR_ERR(sfc->regmap_base),
+ "failed to init sfc base regmap\n");
+
+ sfc->data_buf = devm_kzalloc(dev, SFC_BUF_SIZE, GFP_KERNEL);
+ if (!sfc->data_buf)
+ return -ENOMEM;
+ sfc->info_buf = (__le64 *)(sfc->data_buf + SFC_DATABUF_SIZE);
+
+ ret = aml_sfc_clk_init(sfc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize SFC clock\n");
+
+ /* Enable Amlogic flash controller spi mode */
+ ret = regmap_write(sfc->regmap_base, SFC_SPI_CFG, SPI_MODE_EN);
+ if (ret) {
+ dev_err(dev, "failed to enable SPI mode\n");
+ goto err_out;
+ }
+
+ ret = dma_set_mask(sfc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(sfc->dev, "failed to set dma mask\n");
+ goto err_out;
+ }
+
+ sfc->ecc_eng.dev = &pdev->dev;
+ sfc->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ sfc->ecc_eng.ops = &aml_sfc_ecc_engine_ops;
+ sfc->ecc_eng.priv = sfc;
+
+ ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register Aml host ecc engine.\n");
+ goto err_out;
+ }
+
+ ret = of_property_read_u32(np, "amlogic,rx-adj", &val);
+ if (!ret)
+ sfc->rx_adj = val;
+
+ ctrl->dev.of_node = np;
+ ctrl->mem_ops = &aml_sfc_mem_ops;
+ ctrl->mem_caps = &aml_sfc_mem_caps;
+ ctrl->setup = aml_sfc_setup;
+ ctrl->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD |
+ SPI_RX_DUAL | SPI_TX_OCTAL | SPI_RX_OCTAL;
+ ctrl->max_speed_hz = SFC_MAX_FREQUENCY;
+ ctrl->min_speed_hz = SFC_MIN_FREQUENCY;
+ ctrl->num_chipselect = SFC_MAX_CS_NUM;
+
+ ret = devm_spi_register_controller(dev, ctrl);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ aml_sfc_disable_clk(sfc);
+
+ return ret;
+}
+
+static void aml_sfc_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct aml_sfc *sfc = spi_controller_get_devdata(ctlr);
+
+ aml_sfc_disable_clk(sfc);
+}
+
+static const struct of_device_id aml_sfc_of_match[] = {
+ {
+ .compatible = "amlogic,a4-spifc",
+ .data = &aml_a113l2_sfc_caps
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, aml_sfc_of_match);
+
+static struct platform_driver aml_sfc_driver = {
+ .driver = {
+ .name = "aml_sfc",
+ .of_match_table = aml_sfc_of_match,
+ },
+ .probe = aml_sfc_probe,
+ .remove = aml_sfc_remove,
+};
+module_platform_driver(aml_sfc_driver);
+
+MODULE_DESCRIPTION("Amlogic SPI Flash Controller driver");
+MODULE_AUTHOR("Feng Chen <feng.chen@amlogic.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/spi/spi-amlogic-spisg.c b/drivers/spi/spi-amlogic-spisg.c
index 2ab8bdf2a676..bcd7ec291ad0 100644
--- a/drivers/spi/spi-amlogic-spisg.c
+++ b/drivers/spi/spi-amlogic-spisg.c
@@ -662,7 +662,7 @@ static int aml_spisg_clk_init(struct spisg_device *spisg, void __iomem *base)
clk_disable_unprepare(spisg->pclk);
- tbl = devm_kzalloc(dev, sizeof(struct clk_div_table) * (DIV_NUM + 1), GFP_KERNEL);
+ tbl = devm_kcalloc(dev, (DIV_NUM + 1), sizeof(*tbl), GFP_KERNEL);
if (!tbl)
return -ENOMEM;
@@ -733,7 +733,7 @@ static int aml_spisg_probe(struct platform_device *pdev)
else
ctlr = spi_alloc_host(dev, sizeof(*spisg));
if (!ctlr)
- return dev_err_probe(dev, -ENOMEM, "controller allocation failed\n");
+ return -ENOMEM;
spisg = spi_controller_get_devdata(ctlr);
spisg->controller = ctlr;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 89a6b46cd319..89977bff76d2 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -256,6 +256,7 @@ struct atmel_spi {
void __iomem *regs;
int irq;
struct clk *clk;
+ struct clk *gclk;
struct platform_device *pdev;
unsigned long spi_clk;
@@ -397,20 +398,10 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
* on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
- if (as->caps.has_wdrbt) {
- spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << chip_select))
- | SPI_BIT(WDRBT)
- | SPI_BIT(MODFDIS)
- | SPI_BIT(MSTR));
- } else {
- spi_writel(as, MR,
- SPI_BF(PCS, ~(0x01 << chip_select))
- | SPI_BIT(MODFDIS)
- | SPI_BIT(MSTR));
- }
mr = spi_readl(as, MR);
+ mr = SPI_BFINS(PCS, ~(0x01 << chip_select), mr);
+ spi_writel(as, MR, mr);
/*
* Ensures the clock polarity is valid before we actually
@@ -1490,6 +1481,8 @@ static void atmel_get_caps(struct atmel_spi *as)
static void atmel_spi_init(struct atmel_spi *as)
{
+ u32 mr = 0;
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
@@ -1497,12 +1490,17 @@ static void atmel_spi_init(struct atmel_spi *as)
if (as->fifo_size)
spi_writel(as, CR, SPI_BIT(FIFOEN));
- if (as->caps.has_wdrbt) {
- spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
- | SPI_BIT(MSTR));
- } else {
- spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
- }
+ /*
+ * If GCLK is selected as the source clock for the bit rate generation
+ * Enable the BRSRCCLK/FDIV/DIV32 bit
+ */
+ if (as->gclk)
+ mr |= SPI_BIT(FDIV);
+
+ if (as->caps.has_wdrbt)
+ mr |= SPI_BIT(WDRBT);
+
+ spi_writel(as, MR, mr | SPI_BIT(MODFDIS) | SPI_BIT(MSTR));
if (as->use_pdc)
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
@@ -1565,6 +1563,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
+ as->gclk = devm_clk_get_optional(&pdev->dev, "spi_gclk");
+ if (IS_ERR(as->gclk)) {
+ ret = PTR_ERR(as->gclk);
+ goto out_unmap_regs;
+ }
init_completion(&as->xfer_completion);
@@ -1625,7 +1628,19 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (ret)
goto out_free_irq;
- as->spi_clk = clk_get_rate(clk);
+ /*
+ * In cases where the peripheral clock is higher,the FLEX_SPI_CSRx.SCBR
+ * exceeds the threshold (SCBR ≤ 255), the GCLK is used as the source clock
+ * for the SPCK (SPI Serial Clock) bit rate generation
+ */
+ if (as->gclk) {
+ ret = clk_prepare_enable(as->gclk);
+ if (ret)
+ goto out_disable_clk;
+ as->spi_clk = clk_get_rate(as->gclk);
+ } else {
+ as->spi_clk = clk_get_rate(clk);
+ }
as->fifo_size = 0;
if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
@@ -1660,6 +1675,8 @@ out_free_dma:
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ clk_disable_unprepare(as->gclk);
+out_disable_clk:
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
@@ -1695,6 +1712,8 @@ static void atmel_spi_remove(struct platform_device *pdev)
spin_unlock_irq(&as->lock);
clk_disable_unprepare(as->clk);
+ if (as->gclk)
+ clk_disable_unprepare(as->gclk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1706,6 +1725,8 @@ static int atmel_spi_runtime_suspend(struct device *dev)
struct atmel_spi *as = spi_controller_get_devdata(host);
clk_disable_unprepare(as->clk);
+ if (as->gclk)
+ clk_disable_unprepare(as->gclk);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1715,10 +1736,20 @@ static int atmel_spi_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atmel_spi *as = spi_controller_get_devdata(host);
+ int ret;
pinctrl_pm_select_default_state(dev);
- return clk_prepare_enable(as->clk);
+ ret = clk_prepare_enable(as->clk);
+ if (ret)
+ return ret;
+ if (as->gclk) {
+ ret = clk_prepare_enable(as->gclk);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int atmel_spi_suspend(struct device *dev)
@@ -1746,10 +1777,17 @@ static int atmel_spi_resume(struct device *dev)
ret = clk_prepare_enable(as->clk);
if (ret)
return ret;
+ if (as->gclk) {
+ ret = clk_prepare_enable(as->gclk);
+ if (ret)
+ return ret;
+ }
atmel_spi_init(as);
clk_disable_unprepare(as->clk);
+ if (as->gclk)
+ clk_disable_unprepare(as->gclk);
if (!pm_runtime_suspended(dev)) {
ret = atmel_spi_runtime_resume(dev);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 512d53a8ef4d..e06f412190fd 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -1050,7 +1050,7 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
+ if (adi_axi_pcore_ver_gteq(version, 1, 1)) {
unsigned int sizes = readl(spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
@@ -1064,7 +1064,7 @@ static int spi_engine_probe(struct platform_device *pdev)
}
/* IP v1.5 dropped the requirement for SYNC in offload messages. */
- spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5;
+ spi_engine->offload_requires_sync = !adi_axi_pcore_ver_gteq(version, 1, 5);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
@@ -1091,15 +1091,12 @@ static int spi_engine_probe(struct platform_device *pdev)
host->put_offload = spi_engine_put_offload;
host->num_chipselect = 8;
- /* Some features depend of the IP core version. */
- if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
- if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
- host->mode_bits |= SPI_CS_HIGH;
- host->setup = spi_engine_setup;
- }
- if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
- host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
+ if (adi_axi_pcore_ver_gteq(version, 1, 2)) {
+ host->mode_bits |= SPI_CS_HIGH;
+ host->setup = spi_engine_setup;
}
+ if (adi_axi_pcore_ver_gteq(version, 1, 3))
+ host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
if (host->max_speed_hz == 0)
return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 77de5a07639a..192cc5ef65fb 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -622,7 +622,7 @@ static void bcm2835_spi_dma_rx_done(void *data)
/* reset fifo and HW */
bcm2835_spi_reset_hw(bs);
- /* and mark as completed */;
+ /* and mark as completed */
spi_finalize_current_transfer(ctlr);
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index d288e9d9c187..8fb13df8ff87 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -33,7 +33,7 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 4
-static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
+static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX);
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
@@ -336,7 +336,7 @@ static bool cqspi_is_idle(struct cqspi_st *cqspi)
{
u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
- return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
+ return reg & BIT(CQSPI_REG_CONFIG_IDLE_LSB);
}
static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
@@ -572,7 +572,7 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
<< CQSPI_REG_CMDCTRL_DUMMY_LSB;
- reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+ reg |= BIT(CQSPI_REG_CMDCTRL_RD_EN_LSB);
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
@@ -580,7 +580,7 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
/* setup ADDR BIT field */
if (op->addr.nbytes) {
- reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
reg |= ((op->addr.nbytes - 1) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
@@ -647,7 +647,7 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (op->addr.nbytes) {
- reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
reg |= ((op->addr.nbytes - 1) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
@@ -656,7 +656,7 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
}
if (n_tx) {
- reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= BIT(CQSPI_REG_CMDCTRL_WR_EN_LSB);
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
data = 0;
@@ -720,6 +720,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
+ readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
return 0;
}
@@ -765,6 +766,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
reg_base + CQSPI_REG_INDIRECTRD);
+ readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
while (remaining > 0) {
if (use_irq &&
@@ -1063,6 +1065,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
+ readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
return 0;
}
@@ -1091,6 +1094,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */
+
/*
* As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
* Controller programming sequence, couple of cycles of
@@ -1187,7 +1192,7 @@ static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
* CS2 to 4b'1011
* CS3 to 4b'0111
*/
- chip_select = 0xF & ~(1 << chip_select);
+ chip_select = 0xF & ~BIT(chip_select);
}
reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
@@ -1273,9 +1278,9 @@ static void cqspi_readdata_capture(struct cqspi_st *cqspi,
reg = readl(reg_base + CQSPI_REG_READCAPTURE);
if (bypass)
- reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ reg |= BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB);
else
- reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ reg &= ~BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB);
reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
<< CQSPI_REG_READCAPTURE_DELAY_LSB);
@@ -1722,12 +1727,10 @@ static const struct spi_controller_mem_caps cqspi_mem_caps = {
static int cqspi_setup_flash(struct cqspi_st *cqspi)
{
- unsigned int max_cs = cqspi->num_chipselect - 1;
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
struct cqspi_flash_pdata *f_pdata;
- unsigned int cs;
- int ret;
+ int ret, cs, max_cs = -1;
/* Get flash device data */
for_each_available_child_of_node_scoped(dev->of_node, np) {
@@ -1740,10 +1743,10 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
if (cs >= cqspi->num_chipselect) {
dev_err(dev, "Chip select %d out of range.\n", cs);
return -EINVAL;
- } else if (cs < max_cs) {
- max_cs = cs;
}
+ max_cs = max_t(int, cs, max_cs);
+
f_pdata = &cqspi->f_pdata[cs];
f_pdata->cqspi = cqspi;
f_pdata->cs = cs;
@@ -1753,6 +1756,11 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
return ret;
}
+ if (max_cs < 0) {
+ dev_err(dev, "No flash device declared\n");
+ return -ENODEV;
+ }
+
cqspi->num_chipselect = max_cs + 1;
return 0;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4bd4377551b5..83ea296597e9 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -331,6 +331,8 @@ struct fsl_dspi_dma {
dma_addr_t rx_dma_phys;
struct completion cmd_rx_complete;
struct dma_async_tx_descriptor *rx_desc;
+
+ size_t bufsize;
};
struct fsl_dspi {
@@ -373,6 +375,8 @@ struct fsl_dspi {
void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
};
+static void dspi_setup_accel(struct fsl_dspi *dspi);
+
static bool is_s32g_dspi(struct fsl_dspi *data)
{
return data->devtype_data == &devtype_data[S32G] ||
@@ -468,6 +472,27 @@ static u32 dspi_pop_tx(struct fsl_dspi *dspi)
return txdata;
}
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
+static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+{
+ if (!dspi->rx)
+ return;
+ dspi->dev_to_host(dspi, rxdata);
+}
+
+static int dspi_fifo_error(struct fsl_dspi *dspi, u32 spi_sr)
+{
+ if (spi_sr & (SPI_SR_TFUF | SPI_SR_RFOF)) {
+ dev_err_ratelimited(&dspi->pdev->dev, "FIFO errors:%s%s\n",
+ spi_sr & SPI_SR_TFUF ? " TX underflow," : "",
+ spi_sr & SPI_SR_RFOF ? " RX overflow," : "");
+ return -EIO;
+ }
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DMA_ENGINE)
+
/* Prepare one TX FIFO entry (txdata plus cmd) */
static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
@@ -481,19 +506,37 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
return cmd << 16 | data;
}
-/* Push one word to the RX buffer from the POPR register (RX FIFO) */
-static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
+static size_t dspi_dma_max_datawords(struct fsl_dspi *dspi)
{
- if (!dspi->rx)
- return;
- dspi->dev_to_host(dspi, rxdata);
+ /*
+ * Transfers look like one of these, so we always use a full DMA word
+ * regardless of SPI word size:
+ *
+ * 31 16 15 0
+ * -----------------------------------------
+ * | CONTROL WORD | 16-bit DATA |
+ * -----------------------------------------
+ * or
+ * -----------------------------------------
+ * | CONTROL WORD | UNUSED | 8-bit DATA |
+ * -----------------------------------------
+ */
+ return dspi->dma->bufsize / DMA_SLAVE_BUSWIDTH_4_BYTES;
+}
+
+static size_t dspi_dma_transfer_size(struct fsl_dspi *dspi)
+{
+ return dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES;
}
static void dspi_tx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
+ dma_sync_single_for_cpu(dev, dma->tx_dma_phys,
+ dspi_dma_transfer_size(dspi), DMA_TO_DEVICE);
complete(&dma->cmd_tx_complete);
}
@@ -501,9 +544,13 @@ static void dspi_rx_dma_callback(void *arg)
{
struct fsl_dspi *dspi = arg;
struct fsl_dspi_dma *dma = dspi->dma;
+ struct device *dev = &dspi->pdev->dev;
int i;
if (dspi->rx) {
+ dma_sync_single_for_cpu(dev, dma->rx_dma_phys,
+ dspi_dma_transfer_size(dspi),
+ DMA_FROM_DEVICE);
for (i = 0; i < dspi->words_in_flight; i++)
dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
@@ -513,20 +560,22 @@ static void dspi_rx_dma_callback(void *arg)
static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
{
+ size_t size = dspi_dma_transfer_size(dspi);
struct device *dev = &dspi->pdev->dev;
struct fsl_dspi_dma *dma = dspi->dma;
int time_left;
+ u32 spi_sr;
int i;
for (i = 0; i < dspi->words_in_flight; i++)
dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
+ dma_sync_single_for_device(dev, dma->tx_dma_phys, size, DMA_TO_DEVICE);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
- dma->tx_dma_phys,
- dspi->words_in_flight *
- DMA_SLAVE_BUSWIDTH_4_BYTES,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma->tx_dma_phys, size,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
if (!dma->tx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
@@ -539,12 +588,13 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return -EINVAL;
}
+ dma_sync_single_for_device(dev, dma->rx_dma_phys, size,
+ DMA_FROM_DEVICE);
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
- dma->rx_dma_phys,
- dspi->words_in_flight *
- DMA_SLAVE_BUSWIDTH_4_BYTES,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma->rx_dma_phys, size,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
if (!dma->rx_desc) {
dev_err(dev, "Not able to get desc for DMA xfer\n");
return -EIO;
@@ -565,7 +615,8 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
if (spi_controller_is_target(dspi->ctlr)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
- return 0;
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ return dspi_fifo_error(dspi, spi_sr);
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
@@ -589,13 +640,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return 0;
}
-static void dspi_setup_accel(struct fsl_dspi *dspi);
-
-static int dspi_dma_xfer(struct fsl_dspi *dspi)
+static void dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct spi_message *message = dspi->cur_msg;
struct device *dev = &dspi->pdev->dev;
- int ret = 0;
/*
* dspi->len gets decremented by dspi_pop_tx_pushr in
@@ -605,26 +653,22 @@ static int dspi_dma_xfer(struct fsl_dspi *dspi)
/* Figure out operational bits-per-word for this chunk */
dspi_setup_accel(dspi);
- dspi->words_in_flight = dspi->len / dspi->oper_word_size;
- if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
- dspi->words_in_flight = dspi->devtype_data->fifo_size;
+ dspi->words_in_flight = min(dspi->len / dspi->oper_word_size,
+ dspi_dma_max_datawords(dspi));
message->actual_length += dspi->words_in_flight *
dspi->oper_word_size;
- ret = dspi_next_xfer_dma_submit(dspi);
- if (ret) {
+ message->status = dspi_next_xfer_dma_submit(dspi);
+ if (message->status) {
dev_err(dev, "DMA transfer failed\n");
break;
}
}
-
- return ret;
}
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
- int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct device *dev = &dspi->pdev->dev;
struct dma_slave_config cfg;
struct fsl_dspi_dma *dma;
@@ -644,17 +688,30 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
goto err_tx_channel;
}
- dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
- dma_bufsize, &dma->tx_dma_phys,
- GFP_KERNEL);
+ if (spi_controller_is_target(dspi->ctlr)) {
+ /*
+ * In target mode we have to be ready to receive the maximum
+ * that can possibly be transferred at once by EDMA without any
+ * FIFO underflows.
+ */
+ dma->bufsize = min(dma_get_max_seg_size(dma->chan_rx->device->dev),
+ dma_get_max_seg_size(dma->chan_tx->device->dev)) *
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ dma->bufsize = PAGE_SIZE;
+ }
+
+ dma->tx_dma_buf = dma_alloc_noncoherent(dma->chan_tx->device->dev,
+ dma->bufsize, &dma->tx_dma_phys,
+ DMA_TO_DEVICE, GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
- dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
- dma_bufsize, &dma->rx_dma_phys,
- GFP_KERNEL);
+ dma->rx_dma_buf = dma_alloc_noncoherent(dma->chan_rx->device->dev,
+ dma->bufsize, &dma->rx_dma_phys,
+ DMA_FROM_DEVICE, GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
@@ -689,11 +746,12 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return 0;
err_slave_config:
- dma_free_coherent(dma->chan_rx->device->dev,
- dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys,
+ DMA_FROM_DEVICE);
err_rx_dma_buf:
- dma_free_coherent(dma->chan_tx->device->dev,
- dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys, DMA_TO_DEVICE);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
@@ -707,24 +765,37 @@ err_tx_channel:
static void dspi_release_dma(struct fsl_dspi *dspi)
{
- int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct fsl_dspi_dma *dma = dspi->dma;
if (!dma)
return;
if (dma->chan_tx) {
- dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
- dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_noncoherent(dma->chan_tx->device->dev, dma->bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys,
+ DMA_TO_DEVICE);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
- dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_noncoherent(dma->chan_rx->device->dev, dma->bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys,
+ DMA_FROM_DEVICE);
dma_release_channel(dma->chan_rx);
}
}
+#else
+static void dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+ dspi->cur_msg->status = -EINVAL;
+}
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+ dev_err(&dspi->pdev->dev, "DMA support not enabled in kernel\n");
+ return -EINVAL;
+}
+static void dspi_release_dma(struct fsl_dspi *dspi) {}
+#endif
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
unsigned long clkrate, bool mtf_enabled)
@@ -986,41 +1057,55 @@ static void dspi_fifo_write(struct fsl_dspi *dspi)
dspi->progress, !dspi->irq);
}
-static int dspi_rxtx(struct fsl_dspi *dspi)
+/*
+ * Read the previous transfer from the FIFO and transmit the next one.
+ *
+ * Returns false if the buffer to be transmitted is empty, and true if there is
+ * still data to transmit.
+ */
+static bool dspi_rxtx(struct fsl_dspi *dspi)
{
dspi_fifo_read(dspi);
if (!dspi->len)
/* Success! */
- return 0;
+ return false;
dspi_fifo_write(dspi);
- return -EINPROGRESS;
+ return true;
}
-static int dspi_poll(struct fsl_dspi *dspi)
+static void dspi_poll(struct fsl_dspi *dspi)
{
- int tries = 1000;
+ int tries;
+ int err = 0;
u32 spi_sr;
do {
- regmap_read(dspi->regmap, SPI_SR, &spi_sr);
- regmap_write(dspi->regmap, SPI_SR, spi_sr);
-
- if (spi_sr & SPI_SR_CMDTCF)
+ for (tries = 1000; tries > 0; --tries) {
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ dspi->cur_msg->status = dspi_fifo_error(dspi, spi_sr);
+ if (dspi->cur_msg->status)
+ return;
+ if (spi_sr & SPI_SR_CMDTCF)
+ break;
+ }
+ if (!tries) {
+ err = -ETIMEDOUT;
break;
- } while (--tries);
-
- if (!tries)
- return -ETIMEDOUT;
+ }
+ } while (dspi_rxtx(dspi));
- return dspi_rxtx(dspi);
+ dspi->cur_msg->status = err;
}
static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+ int status;
u32 spi_sr;
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
@@ -1029,8 +1114,19 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
if (!(spi_sr & SPI_SR_CMDTCF))
return IRQ_NONE;
- if (dspi_rxtx(dspi) == 0)
+ status = dspi_fifo_error(dspi, spi_sr);
+ if (status) {
+ if (dspi->cur_msg)
+ WRITE_ONCE(dspi->cur_msg->status, status);
+ complete(&dspi->xfer_done);
+ return IRQ_HANDLED;
+ }
+
+ if (dspi_rxtx(dspi) == false) {
+ if (dspi->cur_msg)
+ WRITE_ONCE(dspi->cur_msg->status, 0);
complete(&dspi->xfer_done);
+ }
return IRQ_HANDLED;
}
@@ -1060,7 +1156,6 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_device *spi = message->spi;
struct spi_transfer *transfer;
bool cs = false;
- int status = 0;
u32 val = 0;
bool cs_change = false;
@@ -1120,7 +1215,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->progress, !dspi->irq);
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
- status = dspi_dma_xfer(dspi);
+ dspi_dma_xfer(dspi);
} else {
/*
* Reinitialize the completion before transferring data
@@ -1134,15 +1229,12 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_fifo_write(dspi);
- if (dspi->irq) {
+ if (dspi->irq)
wait_for_completion(&dspi->xfer_done);
- } else {
- do {
- status = dspi_poll(dspi);
- } while (status == -EINPROGRESS);
- }
+ else
+ dspi_poll(dspi);
}
- if (status)
+ if (READ_ONCE(message->status))
break;
spi_transfer_delay_exec(transfer);
@@ -1151,7 +1243,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_deassert_cs(spi, &cs);
}
- if (status || !cs_change) {
+ dspi->cur_msg = NULL;
+ if (message->status || !cs_change) {
/* Put DSPI in stop mode */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_HALT, SPI_MCR_HALT);
@@ -1160,10 +1253,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
;
}
- message->status = status;
spi_finalize_current_message(ctlr);
- return status;
+ return message->status;
}
static int dspi_set_mtf(struct fsl_dspi *dspi)
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 431439d4cdda..8da66e101386 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -26,6 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
+#include <linux/minmax.h>
#define DRIVER_NAME "fsl_lpspi"
@@ -485,10 +486,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
}
- if (t->len <= fsl_lpspi->txfifosize)
- fsl_lpspi->watermark = t->len;
- else
- fsl_lpspi->watermark = fsl_lpspi->txfifosize;
+ fsl_lpspi->watermark = min_t(typeof(fsl_lpspi->watermark),
+ fsl_lpspi->txfifosize,
+ t->len);
if (fsl_lpspi_can_dma(controller, spi, t))
fsl_lpspi->usedma = true;
diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c
index 2cab79ad2b98..3f412cf8f1cd 100644
--- a/drivers/spi/spi-ljca.c
+++ b/drivers/spi/spi-ljca.c
@@ -289,7 +289,7 @@ static struct auxiliary_driver ljca_spi_driver = {
};
module_auxiliary_driver(ljca_spi_driver);
-MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Wentong Wu");
MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-SPI driver");
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 7dd92deffe3f..e0b131aa29b6 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -446,7 +446,7 @@ static void spi_test_dump_message(struct spi_device *spi,
int i;
u8 b;
- dev_info(&spi->dev, " spi_msg@%pK\n", msg);
+ dev_info(&spi->dev, " spi_msg@%p\n", msg);
if (msg->status)
dev_info(&spi->dev, " status: %i\n",
msg->status);
@@ -456,15 +456,15 @@ static void spi_test_dump_message(struct spi_device *spi,
msg->actual_length);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- dev_info(&spi->dev, " spi_transfer@%pK\n", xfer);
+ dev_info(&spi->dev, " spi_transfer@%p\n", xfer);
dev_info(&spi->dev, " len: %i\n", xfer->len);
- dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf);
+ dev_info(&spi->dev, " tx_buf: %p\n", xfer->tx_buf);
if (dump_data && xfer->tx_buf)
spi_test_print_hex_dump(" TX: ",
xfer->tx_buf,
xfer->len);
- dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf);
+ dev_info(&spi->dev, " rx_buf: %p\n", xfer->rx_buf);
if (dump_data && xfer->rx_buf)
spi_test_print_hex_dump(" RX: ",
xfer->rx_buf,
@@ -558,7 +558,7 @@ static int spi_check_rx_ranges(struct spi_device *spi,
/* if still not found then something has modified too much */
/* we could list the "closest" transfer here... */
dev_err(&spi->dev,
- "loopback strangeness - rx changed outside of allowed range at: %pK\n",
+ "loopback strangeness - rx changed outside of allowed range at: %p\n",
addr);
/* do not return, only set ret,
* so that we list all addresses
@@ -696,7 +696,7 @@ static int spi_test_translate(struct spi_device *spi,
}
dev_err(&spi->dev,
- "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n",
+ "PointerRange [%p:%p[ not in range [%p:%p[ or [%p:%p[\n",
*ptr, *ptr + len,
RX(0), RX(SPI_TEST_MAX_SIZE),
TX(0), TX(SPI_TEST_MAX_SIZE));
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index 8dc98b17f77b..aafe6cbf2aea 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -689,8 +689,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi));
if (!ctlr)
- return dev_err_probe(&pdev->dev, -ENOMEM,
- "unable to allocate host for QSPI controller\n");
+ return -ENOMEM;
qspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, qspi);
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 62ba0bd9cbb7..9128b86c5366 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -534,8 +534,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi));
if (!host)
- return dev_err_probe(&pdev->dev, -ENOMEM,
- "unable to allocate host for SPI controller\n");
+ return -ENOMEM;
platform_set_drvdata(pdev, host);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a6032d44771b..4b40985af1ea 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -563,6 +563,22 @@ static void mtk_spi_setup_packet(struct spi_controller *host)
writel(reg_val, mdata->base + SPI_CFG1_REG);
}
+inline u32 mtk_spi_set_nbit(u32 nbit)
+{
+ switch (nbit) {
+ default:
+ pr_warn_once("unknown nbit mode %u. Falling back to single mode\n",
+ nbit);
+ fallthrough;
+ case SPI_NBITS_SINGLE:
+ return 0x0;
+ case SPI_NBITS_DUAL:
+ return 0x1;
+ case SPI_NBITS_QUAD:
+ return 0x2;
+ }
+}
+
static void mtk_spi_enable_transfer(struct spi_controller *host)
{
u32 cmd;
@@ -729,10 +745,16 @@ static int mtk_spi_transfer_one(struct spi_controller *host,
/* prepare xfer direction and duplex mode */
if (mdata->dev_comp->ipm_design) {
- if (!xfer->tx_buf || !xfer->rx_buf) {
+ if (xfer->tx_buf && xfer->rx_buf) {
+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ } else if (xfer->tx_buf) {
reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
- if (xfer->rx_buf)
- reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ reg_val |= mtk_spi_set_nbit(xfer->tx_nbits);
+ } else {
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
+ reg_val |= mtk_spi_set_nbit(xfer->rx_nbits);
}
writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
}
@@ -1159,7 +1181,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
host = devm_spi_alloc_host(dev, sizeof(*mdata));
if (!host)
- return dev_err_probe(dev, -ENOMEM, "failed to alloc spi host\n");
+ return -ENOMEM;
host->auto_runtime_pm = true;
host->dev.of_node = dev->of_node;
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index e82ee6dcf498..ae38c244e258 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1139,7 +1139,6 @@ static int mtk_snand_write_page_cache(struct mtk_snand *snf,
// Prepare for custom write interrupt
nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
reinit_completion(&snf->op_done);
- ;
// Trigger NFI into custom mode
nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 43455305fdf4..0ebcbdb1b1f7 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -388,7 +388,7 @@ static int mxs_spi_transfer_one(struct spi_controller *host,
TXRX_DEASSERT_CS : 0;
/*
- * Small blocks can be transfered via PIO.
+ * Small blocks can be transferred via PIO.
* Measured by empiric means:
*
* dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 67cc1d86de42..cccd17f24775 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/minmax.h>
#include <linux/spi/spi-mem.h>
#include <linux/mfd/syscon.h>
@@ -498,10 +499,7 @@ static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
do {
addr = ((u32)op->addr.val + i);
- if (currlen < 16)
- readlen = currlen;
- else
- readlen = 16;
+ readlen = min_t(int, currlen, 16);
buf_ptr = data + i;
ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index b92bfef47371..f9371f98a65b 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -330,6 +330,8 @@
/* Access flash memory using IP bus only */
#define FSPI_QUIRK_USE_IP_ONLY BIT(0)
+/* Disable DTR */
+#define FSPI_QUIRK_DISABLE_DTR BIT(1)
struct nxp_fspi_devtype_data {
unsigned int rxfifo;
@@ -344,7 +346,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = {
.rxfifo = SZ_512, /* (64 * 64 bits) */
.txfifo = SZ_1K, /* (128 * 64 bits) */
.ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
- .quirks = 0,
+ .quirks = FSPI_QUIRK_DISABLE_DTR,
.lut_num = 32,
.little_endian = true, /* little-endian */
};
@@ -399,7 +401,8 @@ struct nxp_fspi {
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
-#define FSPI_NEED_INIT (1 << 0)
+#define FSPI_NEED_INIT BIT(0)
+#define FSPI_DTR_MODE BIT(1)
int flags;
};
@@ -559,12 +562,21 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
u32 target_lut_reg;
/* cmd */
- lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
- op->cmd.opcode);
+ if (op->cmd.dtr) {
+ lutval[0] |= LUT_DEF(0, LUT_CMD_DDR, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode >> 8);
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_CMD_DDR,
+ LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode & 0xFF);
+ lutidx++;
+ } else {
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+ }
/* addr bytes */
if (op->addr.nbytes) {
- lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, op->addr.dtr ? LUT_ADDR_DDR : LUT_ADDR,
LUT_PAD(op->addr.buswidth),
op->addr.nbytes * 8);
lutidx++;
@@ -572,7 +584,7 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
/* dummy bytes, if needed */
if (op->dummy.nbytes) {
- lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, op->dummy.dtr ? LUT_DUMMY_DDR : LUT_DUMMY,
/*
* Due to FlexSPI controller limitation number of PAD for dummy
* buswidth needs to be programmed as equal to data buswidth.
@@ -587,7 +599,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
if (op->data.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx,
op->data.dir == SPI_MEM_DATA_IN ?
- LUT_NXP_READ : LUT_NXP_WRITE,
+ (op->data.dtr ? LUT_READ_DDR : LUT_NXP_READ) :
+ (op->data.dtr ? LUT_WRITE_DDR : LUT_NXP_WRITE),
LUT_PAD(op->data.buswidth),
0);
lutidx++;
@@ -645,6 +658,40 @@ static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
return;
}
+/*
+ * Sample Clock source selection for Flash Reading
+ * Four modes defined by fspi:
+ * mode 0: Dummy Read strobe generated by FlexSPI Controller
+ * and loopback internally
+ * mode 1: Dummy Read strobe generated by FlexSPI Controller
+ * and loopback from DQS pad
+ * mode 2: Reserved
+ * mode 3: Flash provided Read strobe and input from DQS pad
+ *
+ * fspi default use mode 0 after reset
+ */
+static void nxp_fspi_select_rx_sample_clk_source(struct nxp_fspi *f,
+ bool op_is_dtr)
+{
+ u32 reg;
+
+ /*
+ * For 8D-8D-8D mode, need to use mode 3 (Flash provided Read
+ * strobe and input from DQS pad), otherwise read operaton may
+ * meet issue.
+ * This mode require flash device connect the DQS pad on board.
+ * For other modes, still use mode 0, keep align with before.
+ * spi_nor_suspend will disable 8D-8D-8D mode, also need to
+ * change the mode back to mode 0.
+ */
+ reg = fspi_readl(f, f->iobase + FSPI_MCR0);
+ if (op_is_dtr)
+ reg |= FSPI_MCR0_RXCLKSRC(3);
+ else /*select mode 0 */
+ reg &= ~FSPI_MCR0_RXCLKSRC(3);
+ fspi_writel(f, reg, f->iobase + FSPI_MCR0);
+}
+
static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
{
int ret;
@@ -675,6 +722,17 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
}
/*
+ * Config the DLL register to default value, enable the target clock delay
+ * line delay cell override mode, and use 1 fixed delay cell in DLL delay
+ * chain, this is the suggested setting when clock rate < 100MHz.
+ */
+static void nxp_fspi_dll_override(struct nxp_fspi *f)
+{
+ fspi_writel(f, FSPI_DLLACR_OVRDEN, f->iobase + FSPI_DLLACR);
+ fspi_writel(f, FSPI_DLLBCR_OVRDEN, f->iobase + FSPI_DLLBCR);
+}
+
+/*
* In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
* register and start base address of the target device.
*
@@ -715,15 +773,18 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
const struct spi_mem_op *op)
{
+ /* flexspi only support one DTR mode: 8D-8D-8D */
+ bool op_is_dtr = op->cmd.dtr && op->addr.dtr && op->dummy.dtr && op->data.dtr;
unsigned long rate = op->max_freq;
int ret;
uint64_t size_kb;
/*
* Return, if previously selected target device is same as current
- * requested target device.
+ * requested target device. Also the DTR or STR mode do not change.
*/
- if (f->selected == spi_get_chipselect(spi, 0))
+ if ((f->selected == spi_get_chipselect(spi, 0)) &&
+ (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr))
return;
/* Reset FLSHxxCR0 registers */
@@ -740,6 +801,18 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0));
+ nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr);
+
+ if (op_is_dtr) {
+ f->flags |= FSPI_DTR_MODE;
+ /* For DTR mode, flexspi will default div 2 and output to device.
+ * so here to config the root clock to 2 * device rate.
+ */
+ rate = rate * 2;
+ } else {
+ f->flags &= ~FSPI_DTR_MODE;
+ }
+
nxp_fspi_clk_disable_unprep(f);
ret = clk_set_rate(f->clk, rate);
@@ -756,6 +829,8 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
*/
if (rate > 100000000)
nxp_fspi_dll_calibration(f);
+ else
+ nxp_fspi_dll_override(f);
f->selected = spi_get_chipselect(spi, 0);
}
@@ -1071,13 +1146,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
/* Disable the module */
fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
- /*
- * Config the DLL register to default value, enable the target clock delay
- * line delay cell override mode, and use 1 fixed delay cell in DLL delay
- * chain, this is the suggested setting when clock rate < 100MHz.
- */
- fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
- fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
+ nxp_fspi_dll_override(f);
/* enable module */
fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) |
@@ -1164,6 +1233,13 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
};
static const struct spi_controller_mem_caps nxp_fspi_mem_caps = {
+ .dtr = true,
+ .swap16 = false,
+ .per_op_freq = true,
+};
+
+static const struct spi_controller_mem_caps nxp_fspi_mem_caps_disable_dtr = {
+ .dtr = false,
.per_op_freq = true,
};
@@ -1279,12 +1355,17 @@ static int nxp_fspi_probe(struct platform_device *pdev)
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
- ctlr->mem_caps = &nxp_fspi_mem_caps;
+
+ if (f->devtype_data->quirks & FSPI_QUIRK_DISABLE_DTR)
+ ctlr->mem_caps = &nxp_fspi_mem_caps_disable_dtr;
+ else
+ ctlr->mem_caps = &nxp_fspi_mem_caps;
+
ctlr->dev.of_node = np;
ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f);
if (ret)
- return dev_err_probe(dev, ret, "Failed to register nxp_fspi_cleanup\n");
+ return ret;
return devm_spi_register_controller(&pdev->dev, ctlr);
}
diff --git a/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c b/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c
index 035d088d4d33..8468c773713a 100644
--- a/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c
+++ b/drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c
@@ -5,12 +5,15 @@
*/
#include <linux/clk.h>
-#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/types.h>
static bool adi_util_sigma_delta_match(struct spi_offload_trigger *trigger,
enum spi_offload_trigger_type type,
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index dd87cf4f70dd..9e56e8774614 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/minmax.h>
/*
* This macro is used to define some register default values.
@@ -760,10 +761,9 @@ static void setup_dma_scatter(struct pl022 *pl022,
* we just feed in this, else we stuff in as much
* as we can.
*/
- if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
- mapbytes = bytesleft;
- else
- mapbytes = PAGE_SIZE - offset_in_page(bufp);
+ mapbytes = min_t(int, bytesleft,
+ PAGE_SIZE - offset_in_page(bufp));
+
sg_set_page(sg, virt_to_page(bufp),
mapbytes, offset_in_page(bufp));
bufp += mapbytes;
@@ -775,10 +775,7 @@ static void setup_dma_scatter(struct pl022 *pl022,
} else {
/* Map the dummy buffer on every page */
for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
- if (bytesleft < PAGE_SIZE)
- mapbytes = bytesleft;
- else
- mapbytes = PAGE_SIZE;
+ mapbytes = min_t(int, bytesleft, PAGE_SIZE);
sg_set_page(sg, virt_to_page(pl022->dummypage),
mapbytes, 0);
bytesleft -= mapbytes;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 06711a62fa3d..ec7117a94d5f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1283,7 +1283,7 @@ int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp,
else
controller = devm_spi_alloc_host(dev, sizeof(*drv_data));
if (!controller)
- return dev_err_probe(dev, -ENOMEM, "cannot alloc spi_controller\n");
+ return -ENOMEM;
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index 780abb967822..58ceea1ea8fb 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -78,7 +78,6 @@ struct qcom_ecc_stats {
};
struct qpic_ecc {
- struct device *dev;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
@@ -95,8 +94,6 @@ struct qpic_ecc {
u32 cfg1_raw;
u32 ecc_buf_cfg;
u32 ecc_bch_cfg;
- u32 clrflashstatus;
- u32 clrreadstatus;
bool bch_enabled;
};
@@ -382,12 +379,12 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
- ecc_cfg->clrflashstatus = FS_READY_BSY_N;
- ecc_cfg->clrreadstatus = 0xc0;
conf->step_size = ecc_cfg->step_size;
conf->strength = ecc_cfg->strength;
+ snandc->regs->clrflashstatus = cpu_to_le32(FS_READY_BSY_N);
+ snandc->regs->clrreadstatus = cpu_to_le32(0xc0);
snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
@@ -494,9 +491,14 @@ qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int c
qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
- qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ if (use_ecc) {
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ } else {
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+ }
}
static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
@@ -599,8 +601,6 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
snandc->regs->cfg0 = cpu_to_le32(cfg0);
snandc->regs->cfg1 = cpu_to_le32(cfg1);
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
- snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
- snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
@@ -734,8 +734,6 @@ static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_bu
snandc->regs->cfg0 = cpu_to_le32(cfg0);
snandc->regs->cfg1 = cpu_to_le32(cfg1);
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
- snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
- snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
@@ -850,8 +848,6 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
snandc->regs->cfg0 = cpu_to_le32(cfg0);
snandc->regs->cfg1 = cpu_to_le32(cfg1);
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
- snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
- snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
@@ -943,8 +939,6 @@ static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
snandc->regs->cfg0 = cpu_to_le32(cfg0);
snandc->regs->cfg1 = cpu_to_le32(cfg1);
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
- snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
- snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
@@ -1064,8 +1058,6 @@ static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
snandc->regs->cfg0 = cpu_to_le32(cfg0);
snandc->regs->cfg1 = cpu_to_le32(cfg1);
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
- snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
- snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_config_page_write(snandc);
@@ -1549,17 +1541,16 @@ static int qcom_spi_probe(struct platform_device *pdev)
}
snandc->props = dev_data;
- snandc->dev = &pdev->dev;
- snandc->core_clk = devm_clk_get(dev, "core");
+ snandc->core_clk = devm_clk_get_enabled(dev, "core");
if (IS_ERR(snandc->core_clk))
return PTR_ERR(snandc->core_clk);
- snandc->aon_clk = devm_clk_get(dev, "aon");
+ snandc->aon_clk = devm_clk_get_enabled(dev, "aon");
if (IS_ERR(snandc->aon_clk))
return PTR_ERR(snandc->aon_clk);
- snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
+ snandc->qspi->iomacro_clk = devm_clk_get_enabled(dev, "iom");
if (IS_ERR(snandc->qspi->iomacro_clk))
return PTR_ERR(snandc->qspi->iomacro_clk);
@@ -1573,18 +1564,6 @@ static int qcom_spi_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, snandc->base_dma))
return -ENXIO;
- ret = clk_prepare_enable(snandc->core_clk);
- if (ret)
- goto err_dis_core_clk;
-
- ret = clk_prepare_enable(snandc->aon_clk);
- if (ret)
- goto err_dis_aon_clk;
-
- ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
- if (ret)
- goto err_dis_iom_clk;
-
ret = qcom_nandc_alloc(snandc);
if (ret)
goto err_snand_alloc;
@@ -1625,12 +1604,6 @@ err_register_controller:
err_spi_init:
qcom_nandc_unalloc(snandc);
err_snand_alloc:
- clk_disable_unprepare(snandc->qspi->iomacro_clk);
-err_dis_iom_clk:
- clk_disable_unprepare(snandc->aon_clk);
-err_dis_aon_clk:
- clk_disable_unprepare(snandc->core_clk);
-err_dis_core_clk:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
@@ -1645,11 +1618,6 @@ static void qcom_spi_remove(struct platform_device *pdev)
spi_unregister_controller(ctlr);
nand_ecc_unregister_on_host_hw_engine(&snandc->qspi->ecc_eng);
qcom_nandc_unalloc(snandc);
-
- clk_disable_unprepare(snandc->aon_clk);
- clk_disable_unprepare(snandc->core_clk);
- clk_disable_unprepare(snandc->qspi->iomacro_clk);
-
dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
}
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index e71d3805b150..22b86fc89132 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -16,7 +16,16 @@
#include <linux/spi/spi.h>
#include <linux/of.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
+#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
+#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
+
+#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
+#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
+#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
struct rb4xx_spi {
void __iomem *base;
@@ -63,7 +72,7 @@ static inline void do_spi_clk_two(struct rb4xx_spi *rbspi, u32 spi_ioc,
if (value & BIT(1))
regval |= AR71XX_SPI_IOC_DO;
if (value & BIT(0))
- regval |= AR71XX_SPI_IOC_CS2;
+ regval |= AR71XX_SPI_IOC_CS(2);
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
@@ -89,7 +98,7 @@ static void rb4xx_set_cs(struct spi_device *spi, bool enable)
*/
if (enable)
rb4xx_write(rbspi, AR71XX_SPI_REG_IOC,
- AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1);
+ AR71XX_SPI_IOC_CS(0) | AR71XX_SPI_IOC_CS(1));
}
static int rb4xx_transfer_one(struct spi_controller *host,
@@ -109,10 +118,10 @@ static int rb4xx_transfer_one(struct spi_controller *host,
*/
if (spi_get_chipselect(spi, 0) == 2)
/* MMC */
- spi_ioc = AR71XX_SPI_IOC_CS0;
+ spi_ioc = AR71XX_SPI_IOC_CS(0);
else
/* Boot flash and CPLD */
- spi_ioc = AR71XX_SPI_IOC_CS1;
+ spi_ioc = AR71XX_SPI_IOC_CS(1);
tx_buf = t->tx_buf;
rx_buf = t->rx_buf;
@@ -147,7 +156,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
- ahb_clk = devm_clk_get(&pdev->dev, "ahb");
+ ahb_clk = devm_clk_get_enabled(&pdev->dev, "ahb");
if (IS_ERR(ahb_clk))
return PTR_ERR(ahb_clk);
@@ -163,7 +172,6 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
rbspi = spi_controller_get_devdata(host);
rbspi->base = spi_base;
rbspi->clk = ahb_clk;
- platform_set_drvdata(pdev, rbspi);
err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
@@ -171,23 +179,12 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
return err;
}
- err = clk_prepare_enable(ahb_clk);
- if (err)
- return err;
-
/* Enable SPI */
rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
return 0;
}
-static void rb4xx_spi_remove(struct platform_device *pdev)
-{
- struct rb4xx_spi *rbspi = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(rbspi->clk);
-}
-
static const struct of_device_id rb4xx_spi_dt_match[] = {
{ .compatible = "mikrotik,rb4xx-spi" },
{ },
@@ -196,10 +193,9 @@ MODULE_DEVICE_TABLE(of, rb4xx_spi_dt_match);
static struct platform_driver rb4xx_spi_drv = {
.probe = rb4xx_spi_probe,
- .remove = rb4xx_spi_remove,
.driver = {
.name = "rb4xx-spi",
- .of_match_table = of_match_ptr(rb4xx_spi_dt_match),
+ .of_match_table = rb4xx_spi_dt_match,
},
};
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 627cffea5d5c..6edc0c4db854 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -196,21 +196,23 @@ static void rpcif_spi_remove(struct platform_device *pdev)
pm_runtime_disable(rpc->dev);
}
-static int __maybe_unused rpcif_spi_suspend(struct device *dev)
+static int rpcif_spi_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
return spi_controller_suspend(ctlr);
}
-static int __maybe_unused rpcif_spi_resume(struct device *dev)
+static int rpcif_spi_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
+ rpcif_hw_init(dev, false);
+
return spi_controller_resume(ctlr);
}
-static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
static const struct platform_device_id rpc_if_spi_id_table[] = {
{ .name = "rpc-if-spi" },
@@ -224,9 +226,7 @@ static struct platform_driver rpcif_spi_driver = {
.id_table = rpc_if_spi_id_table,
.driver = {
.name = "rpc-if-spi",
-#ifdef CONFIG_PM_SLEEP
- .pm = &rpcif_spi_pm_ops,
-#endif
+ .pm = pm_sleep_ptr(&rpcif_spi_pm_ops),
},
};
module_platform_driver(rpcif_spi_driver);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b1567243ae19..aab36c779c06 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1268,8 +1268,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
host = devm_spi_alloc_host(&pdev->dev, sizeof(*sdd));
if (!host)
- return dev_err_probe(&pdev->dev, -ENOMEM,
- "Unable to allocate SPI Host\n");
+ return -ENOMEM;
platform_set_drvdata(pdev, host);
@@ -1507,16 +1506,6 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
s3c64xx_spi_runtime_resume, NULL)
};
-static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
- /* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
- .fifo_lvl_mask = { 0x7f },
- /* rx_lvl_offset is deprecated. Use {rx, tx}_fifomask instead. */
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
- .clk_div = 2,
- .high_speed = true,
-};
-
static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
/* fifo_lvl_mask is deprecated. Use {rx, tx}_fifomask instead. */
.fifo_lvl_mask = { 0x7f, 0x7F },
@@ -1628,9 +1617,6 @@ static const struct s3c64xx_spi_port_config gs101_spi_port_config = {
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
- .name = "s3c2443-spi",
- .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
- }, {
.name = "s3c6410-spi",
.driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
},
@@ -1642,9 +1628,6 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "google,gs101-spi",
.data = &gs101_spi_port_config,
},
- { .compatible = "samsung,s3c2443-spi",
- .data = &s3c2443_spi_port_config,
- },
{ .compatible = "samsung,s3c6410-spi",
.data = &s3c6410_spi_port_config,
},
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
index 7fd4cc6f74c2..256ae07db6be 100644
--- a/drivers/spi/spi-sunplus-sp7021.c
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -103,7 +103,7 @@ static irqreturn_t sp7021_spi_target_irq(int irq, void *dev)
data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
data_status |= SP7021_SLAVE_CLR_INT;
- writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
+ writel(data_status, pspim->s_base + SP7021_DATA_RDY_REG);
complete(&pspim->target_isr);
return IRQ_HANDLED;
}
@@ -296,7 +296,7 @@ static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfe
}
static int sp7021_spi_host_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
unsigned long timeout = msecs_to_jiffies(1000);
@@ -360,7 +360,7 @@ static int sp7021_spi_host_transfer_one(struct spi_controller *ctlr, struct spi_
}
static int sp7021_spi_target_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
struct device *dev = pspim->dev;
diff --git a/drivers/spi/spi-virtio.c b/drivers/spi/spi-virtio.c
new file mode 100644
index 000000000000..2acb929b2c69
--- /dev/null
+++ b/drivers/spi/spi-virtio.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI bus driver for the Virtio SPI controller
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_spi.h>
+
+#define VIRTIO_SPI_MODE_MASK \
+ (SPI_MODE_X_MASK | SPI_CS_HIGH | SPI_LSB_FIRST)
+
+struct virtio_spi_req {
+ struct completion completion;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ struct spi_transfer_head transfer_head ____cacheline_aligned;
+ struct spi_transfer_result result;
+};
+
+struct virtio_spi_priv {
+ /* The virtio device we're associated with */
+ struct virtio_device *vdev;
+ /* Pointer to the virtqueue */
+ struct virtqueue *vq;
+ /* Copy of config space mode_func_supported */
+ u32 mode_func_supported;
+ /* Copy of config space max_freq_hz */
+ u32 max_freq_hz;
+};
+
+static void virtio_spi_msg_done(struct virtqueue *vq)
+{
+ struct virtio_spi_req *req;
+ unsigned int len;
+
+ while ((req = virtqueue_get_buf(vq, &len)))
+ complete(&req->completion);
+}
+
+/*
+ * virtio_spi_set_delays - Set delay parameters for SPI transfer
+ *
+ * This function sets various delay parameters for SPI transfer,
+ * including delay after CS asserted, timing intervals between
+ * adjacent words within a transfer, delay before and after CS
+ * deasserted. It converts these delay parameters to nanoseconds
+ * using spi_delay_to_ns and stores the results in spi_transfer_head
+ * structure.
+ * If the conversion fails, the function logs a warning message and
+ * returns an error code.
+ * . . . . . . . . . .
+ * Delay + A + + B + + C + D + E + F + A +
+ * . . . . . . . . . .
+ * ___. . . . . . .___.___. .
+ * CS# |___.______.____.____.___.___| . |___._____________
+ * . . . . . . . . . .
+ * . . . . . . . . . .
+ * SCLK__.___.___NNN_____NNN__.___.___.___.___.___.___NNN_______
+ *
+ * NOTE: 1st transfer has two words, the delay between these two words are
+ * 'B' in the diagram.
+ *
+ * A => struct spi_device -> cs_setup
+ * B => max{struct spi_transfer -> word_delay, struct spi_device -> word_delay}
+ * Note: spi_device and spi_transfer both have word_delay, Linux
+ * choose the bigger one, refer to _spi_xfer_word_delay_update function
+ * C => struct spi_transfer -> delay
+ * D => struct spi_device -> cs_hold
+ * E => struct spi_device -> cs_inactive
+ * F => struct spi_transfer -> cs_change_delay
+ *
+ * So the corresponding relationship:
+ * A <===> cs_setup_ns (after CS asserted)
+ * B <===> word_delay_ns (delay between adjacent words within a transfer)
+ * C+D <===> cs_delay_hold_ns (before CS deasserted)
+ * E+F <===> cs_change_delay_inactive_ns (after CS deasserted, these two
+ * values are also recommended in the Linux driver to be added up)
+ */
+static int virtio_spi_set_delays(struct spi_transfer_head *th,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int cs_setup;
+ int cs_word_delay_xfer;
+ int cs_word_delay_spi;
+ int delay;
+ int cs_hold;
+ int cs_inactive;
+ int cs_change_delay;
+
+ cs_setup = spi_delay_to_ns(&spi->cs_setup, xfer);
+ if (cs_setup < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_setup\n");
+ return cs_setup;
+ }
+ th->cs_setup_ns = cpu_to_le32(cs_setup);
+
+ cs_word_delay_xfer = spi_delay_to_ns(&xfer->word_delay, xfer);
+ if (cs_word_delay_xfer < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_word_delay_xfer\n");
+ return cs_word_delay_xfer;
+ }
+ cs_word_delay_spi = spi_delay_to_ns(&spi->word_delay, xfer);
+ if (cs_word_delay_spi < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_word_delay_spi\n");
+ return cs_word_delay_spi;
+ }
+
+ th->word_delay_ns = cpu_to_le32(max(cs_word_delay_spi, cs_word_delay_xfer));
+
+ delay = spi_delay_to_ns(&xfer->delay, xfer);
+ if (delay < 0) {
+ dev_warn(&spi->dev, "Cannot convert delay\n");
+ return delay;
+ }
+ cs_hold = spi_delay_to_ns(&spi->cs_hold, xfer);
+ if (cs_hold < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_hold\n");
+ return cs_hold;
+ }
+ th->cs_delay_hold_ns = cpu_to_le32(delay + cs_hold);
+
+ cs_inactive = spi_delay_to_ns(&spi->cs_inactive, xfer);
+ if (cs_inactive < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_inactive\n");
+ return cs_inactive;
+ }
+ cs_change_delay = spi_delay_to_ns(&xfer->cs_change_delay, xfer);
+ if (cs_change_delay < 0) {
+ dev_warn(&spi->dev, "Cannot convert cs_change_delay\n");
+ return cs_change_delay;
+ }
+ th->cs_change_delay_inactive_ns =
+ cpu_to_le32(cs_inactive + cs_change_delay);
+
+ return 0;
+}
+
+static int virtio_spi_transfer_one(struct spi_controller *ctrl,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct virtio_spi_priv *priv = spi_controller_get_devdata(ctrl);
+ struct virtio_spi_req *spi_req __free(kfree) = NULL;
+ struct spi_transfer_head *th;
+ struct scatterlist sg_out_head, sg_out_payload;
+ struct scatterlist sg_in_result, sg_in_payload;
+ struct scatterlist *sgs[4];
+ unsigned int outcnt = 0;
+ unsigned int incnt = 0;
+ int ret;
+
+ spi_req = kzalloc(sizeof(*spi_req), GFP_KERNEL);
+ if (!spi_req)
+ return -ENOMEM;
+
+ init_completion(&spi_req->completion);
+
+ th = &spi_req->transfer_head;
+
+ /* Fill struct spi_transfer_head */
+ th->chip_select_id = spi_get_chipselect(spi, 0);
+ th->bits_per_word = spi->bits_per_word;
+ th->cs_change = xfer->cs_change;
+ th->tx_nbits = xfer->tx_nbits;
+ th->rx_nbits = xfer->rx_nbits;
+ th->reserved[0] = 0;
+ th->reserved[1] = 0;
+ th->reserved[2] = 0;
+
+ static_assert(VIRTIO_SPI_CPHA == SPI_CPHA,
+ "VIRTIO_SPI_CPHA must match SPI_CPHA");
+ static_assert(VIRTIO_SPI_CPOL == SPI_CPOL,
+ "VIRTIO_SPI_CPOL must match SPI_CPOL");
+ static_assert(VIRTIO_SPI_CS_HIGH == SPI_CS_HIGH,
+ "VIRTIO_SPI_CS_HIGH must match SPI_CS_HIGH");
+ static_assert(VIRTIO_SPI_MODE_LSB_FIRST == SPI_LSB_FIRST,
+ "VIRTIO_SPI_MODE_LSB_FIRST must match SPI_LSB_FIRST");
+
+ th->mode = cpu_to_le32(spi->mode & VIRTIO_SPI_MODE_MASK);
+ if (spi->mode & SPI_LOOP)
+ th->mode |= cpu_to_le32(VIRTIO_SPI_MODE_LOOP);
+
+ th->freq = cpu_to_le32(xfer->speed_hz);
+
+ ret = virtio_spi_set_delays(th, spi, xfer);
+ if (ret)
+ goto msg_done;
+
+ /* Set buffers */
+ spi_req->tx_buf = xfer->tx_buf;
+ spi_req->rx_buf = xfer->rx_buf;
+
+ /* Prepare sending of virtio message */
+ init_completion(&spi_req->completion);
+
+ sg_init_one(&sg_out_head, th, sizeof(*th));
+ sgs[outcnt] = &sg_out_head;
+ outcnt++;
+
+ if (spi_req->tx_buf) {
+ sg_init_one(&sg_out_payload, spi_req->tx_buf, xfer->len);
+ sgs[outcnt] = &sg_out_payload;
+ outcnt++;
+ }
+
+ if (spi_req->rx_buf) {
+ sg_init_one(&sg_in_payload, spi_req->rx_buf, xfer->len);
+ sgs[outcnt] = &sg_in_payload;
+ incnt++;
+ }
+
+ sg_init_one(&sg_in_result, &spi_req->result,
+ sizeof(struct spi_transfer_result));
+ sgs[outcnt + incnt] = &sg_in_result;
+ incnt++;
+
+ ret = virtqueue_add_sgs(priv->vq, sgs, outcnt, incnt, spi_req,
+ GFP_KERNEL);
+ if (ret)
+ goto msg_done;
+
+ /* Simple implementation: There can be only one transfer in flight */
+ virtqueue_kick(priv->vq);
+
+ wait_for_completion(&spi_req->completion);
+
+ /* Read result from message and translate return code */
+ switch (spi_req->result.result) {
+ case VIRTIO_SPI_TRANS_OK:
+ break;
+ case VIRTIO_SPI_PARAM_ERR:
+ ret = -EINVAL;
+ break;
+ case VIRTIO_SPI_TRANS_ERR:
+ ret = -EIO;
+ break;
+ default:
+ ret = -EIO;
+ break;
+ }
+
+msg_done:
+ if (ret)
+ ctrl->cur_msg->status = ret;
+
+ return ret;
+}
+
+static void virtio_spi_read_config(struct virtio_device *vdev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(&vdev->dev);
+ struct virtio_spi_priv *priv = vdev->priv;
+ u8 cs_max_number;
+ u8 tx_nbits_supported;
+ u8 rx_nbits_supported;
+
+ cs_max_number = virtio_cread8(vdev, offsetof(struct virtio_spi_config,
+ cs_max_number));
+ ctrl->num_chipselect = cs_max_number;
+
+ /* Set the mode bits which are understood by this driver */
+ priv->mode_func_supported =
+ virtio_cread32(vdev, offsetof(struct virtio_spi_config,
+ mode_func_supported));
+ ctrl->mode_bits = priv->mode_func_supported &
+ (VIRTIO_SPI_CS_HIGH | VIRTIO_SPI_MODE_LSB_FIRST);
+ if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_CPHA_1)
+ ctrl->mode_bits |= VIRTIO_SPI_CPHA;
+ if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_CPOL_1)
+ ctrl->mode_bits |= VIRTIO_SPI_CPOL;
+ if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_LSB_FIRST)
+ ctrl->mode_bits |= SPI_LSB_FIRST;
+ if (priv->mode_func_supported & VIRTIO_SPI_MF_SUPPORT_LOOPBACK)
+ ctrl->mode_bits |= SPI_LOOP;
+ tx_nbits_supported =
+ virtio_cread8(vdev, offsetof(struct virtio_spi_config,
+ tx_nbits_supported));
+ if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_DUAL)
+ ctrl->mode_bits |= SPI_TX_DUAL;
+ if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_QUAD)
+ ctrl->mode_bits |= SPI_TX_QUAD;
+ if (tx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_OCTAL)
+ ctrl->mode_bits |= SPI_TX_OCTAL;
+ rx_nbits_supported =
+ virtio_cread8(vdev, offsetof(struct virtio_spi_config,
+ rx_nbits_supported));
+ if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_DUAL)
+ ctrl->mode_bits |= SPI_RX_DUAL;
+ if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_QUAD)
+ ctrl->mode_bits |= SPI_RX_QUAD;
+ if (rx_nbits_supported & VIRTIO_SPI_RX_TX_SUPPORT_OCTAL)
+ ctrl->mode_bits |= SPI_RX_OCTAL;
+
+ ctrl->bits_per_word_mask =
+ virtio_cread32(vdev, offsetof(struct virtio_spi_config,
+ bits_per_word_mask));
+
+ priv->max_freq_hz =
+ virtio_cread32(vdev, offsetof(struct virtio_spi_config,
+ max_freq_hz));
+}
+
+static int virtio_spi_find_vqs(struct virtio_spi_priv *priv)
+{
+ struct virtqueue *vq;
+
+ vq = virtio_find_single_vq(priv->vdev, virtio_spi_msg_done, "spi-rq");
+ if (IS_ERR(vq))
+ return PTR_ERR(vq);
+ priv->vq = vq;
+ return 0;
+}
+
+/* Function must not be called before virtio_spi_find_vqs() has been run */
+static void virtio_spi_del_vq(void *data)
+{
+ struct virtio_device *vdev = data;
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+}
+
+static int virtio_spi_probe(struct virtio_device *vdev)
+{
+ struct virtio_spi_priv *priv;
+ struct spi_controller *ctrl;
+ int ret;
+
+ ctrl = devm_spi_alloc_host(&vdev->dev, sizeof(*priv));
+ if (!ctrl)
+ return -ENOMEM;
+
+ priv = spi_controller_get_devdata(ctrl);
+ priv->vdev = vdev;
+ vdev->priv = priv;
+
+ device_set_node(&ctrl->dev, dev_fwnode(&vdev->dev));
+
+ dev_set_drvdata(&vdev->dev, ctrl);
+
+ virtio_spi_read_config(vdev);
+
+ ctrl->transfer_one = virtio_spi_transfer_one;
+
+ ret = virtio_spi_find_vqs(priv);
+ if (ret)
+ return dev_err_probe(&vdev->dev, ret, "Cannot setup virtqueues\n");
+
+ /* Register cleanup for virtqueues using devm */
+ ret = devm_add_action_or_reset(&vdev->dev, virtio_spi_del_vq, vdev);
+ if (ret)
+ return dev_err_probe(&vdev->dev, ret, "Cannot register virtqueue cleanup\n");
+
+ /* Use devm version to register controller */
+ ret = devm_spi_register_controller(&vdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(&vdev->dev, ret, "Cannot register controller\n");
+
+ return 0;
+}
+
+static int virtio_spi_freeze(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ int ret;
+
+ ret = spi_controller_suspend(ctrl);
+ if (ret) {
+ dev_warn(dev, "cannot suspend controller (%d)\n", ret);
+ return ret;
+ }
+
+ virtio_spi_del_vq(vdev);
+ return 0;
+}
+
+static int virtio_spi_restore(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ int ret;
+
+ ret = virtio_spi_find_vqs(vdev->priv);
+ if (ret) {
+ dev_err(dev, "problem starting vqueue (%d)\n", ret);
+ return ret;
+ }
+
+ ret = spi_controller_resume(ctrl);
+ if (ret)
+ dev_err(dev, "problem resuming controller (%d)\n", ret);
+
+ return ret;
+}
+
+static struct virtio_device_id virtio_spi_id_table[] = {
+ { VIRTIO_ID_SPI, VIRTIO_DEV_ANY_ID },
+ {}
+};
+MODULE_DEVICE_TABLE(virtio, virtio_spi_id_table);
+
+static const struct dev_pm_ops virtio_spi_pm_ops = {
+ .freeze = pm_sleep_ptr(virtio_spi_freeze),
+ .restore = pm_sleep_ptr(virtio_spi_restore),
+};
+
+static struct virtio_driver virtio_spi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &virtio_spi_pm_ops,
+ },
+ .id_table = virtio_spi_id_table,
+ .probe = virtio_spi_probe,
+};
+module_virtio_driver(virtio_spi_driver);
+
+MODULE_AUTHOR("OpenSynergy GmbH");
+MODULE_AUTHOR("Haixu Cui <quic_haixcui@quicinc.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtio SPI bus driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a388f372b27a..2e0647a06890 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -427,15 +427,13 @@ static int spi_probe(struct device *dev)
if (spi->irq < 0)
spi->irq = 0;
- ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
return ret;
- if (sdrv->probe) {
+ if (sdrv->probe)
ret = sdrv->probe(spi);
- if (ret)
- dev_pm_domain_detach(dev, true);
- }
return ret;
}
@@ -446,8 +444,6 @@ static void spi_remove(struct device *dev)
if (sdrv->remove)
sdrv->remove(to_spi_device(dev));
-
- dev_pm_domain_detach(dev, true);
}
static void spi_shutdown(struct device *dev)
@@ -590,6 +586,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->mode = ctlr->buswidth_override_bits;
+ spi->num_chipselect = 1;
device_initialize(&spi->dev);
return spi;
@@ -626,11 +623,6 @@ static void spi_dev_set_name(struct spi_device *spi)
*/
#define SPI_INVALID_CS ((s8)-1)
-static inline bool is_valid_cs(s8 chip_select)
-{
- return chip_select != SPI_INVALID_CS;
-}
-
static inline int spi_dev_check_cs(struct device *dev,
struct spi_device *spi, u8 idx,
struct spi_device *new_spi, u8 new_idx)
@@ -639,9 +631,9 @@ static inline int spi_dev_check_cs(struct device *dev,
u8 idx_new;
cs = spi_get_chipselect(spi, idx);
- for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
+ for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) {
cs_new = spi_get_chipselect(new_spi, idx_new);
- if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
+ if (cs == cs_new) {
dev_err(dev, "chipselect %u already in use\n", cs_new);
return -EBUSY;
}
@@ -656,7 +648,7 @@ static int spi_dev_check(struct device *dev, void *data)
int status, idx;
if (spi->controller == new_spi->controller) {
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
if (status)
return status;
@@ -678,10 +670,16 @@ static int __spi_add_device(struct spi_device *spi)
int status, idx;
u8 cs;
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) {
+ dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect,
+ SPI_DEVICE_CS_CNT_MAX);
+ return -EOVERFLOW;
+ }
+
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
/* Chipselects are numbered 0..max; validate. */
cs = spi_get_chipselect(spi, idx);
- if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
+ if (cs >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
ctlr->num_chipselect);
return -EINVAL;
@@ -693,13 +691,17 @@ static int __spi_add_device(struct spi_device *spi)
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
if (!spi_controller_is_target(ctlr)) {
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
if (status)
return status;
}
}
+ /* Initialize unused logical CS as invalid */
+ for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, SPI_INVALID_CS);
+
/* Set the bus ID string */
spi_dev_set_name(spi);
@@ -721,10 +723,9 @@ static int __spi_add_device(struct spi_device *spi)
if (ctlr->cs_gpiods) {
u8 cs;
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
cs = spi_get_chipselect(spi, idx);
- if (is_valid_cs(cs))
- spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
+ spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
}
}
@@ -777,14 +778,6 @@ int spi_add_device(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_add_device);
-static void spi_set_all_cs_unused(struct spi_device *spi)
-{
- u8 idx;
-
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(spi, idx, SPI_INVALID_CS);
-}
-
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -820,7 +813,6 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
/* Use provided chip-select for proxy device */
- spi_set_all_cs_unused(proxy);
spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
@@ -1028,7 +1020,7 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes
/*-------------------------------------------------------------------------*/
#define spi_for_each_valid_cs(spi, idx) \
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \
+ for (idx = 0; idx < spi->num_chipselect; idx++) \
if (!(spi->cs_index_mask & BIT(idx))) {} else
static inline bool spi_is_last_cs(struct spi_device *spi)
@@ -1084,8 +1076,12 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
trace_spi_set_cs(spi, activate);
spi->controller->last_cs_index_mask = spi->cs_index_mask;
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
+ for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) {
+ if (enable && idx < spi->num_chipselect)
+ spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0);
+ else
+ spi->controller->last_cs[idx] = SPI_INVALID_CS;
+ }
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
if (spi->controller->last_cs_mode_high)
@@ -2358,7 +2354,7 @@ static void of_spi_parse_dt_cs_delay(struct device_node *nc,
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
- u32 value, cs[SPI_CS_CNT_MAX];
+ u32 value, cs[SPI_DEVICE_CS_CNT_MAX];
int rc, idx;
/* Mode (clock phase/polarity/etc.) */
@@ -2431,31 +2427,22 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
return 0;
}
- if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
- dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
- return -EINVAL;
- }
-
- spi_set_all_cs_unused(spi);
-
/* Device address */
rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
- SPI_CS_CNT_MAX);
+ SPI_DEVICE_CS_CNT_MAX);
if (rc < 0) {
dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, rc);
return rc;
}
- if (rc > ctlr->num_chipselect) {
- dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
- nc, rc);
- return rc;
- }
+
if ((of_property_present(nc, "parallel-memories")) &&
(!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
return -EINVAL;
}
+
+ spi->num_chipselect = rc;
for (idx = 0; idx < rc; idx++)
spi_set_chipselect(spi, idx, cs[idx]);
@@ -2580,7 +2567,6 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
- spi_set_all_cs_unused(ancillary);
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
@@ -2828,7 +2814,6 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
return ERR_PTR(-ENOMEM);
}
- spi_set_all_cs_unused(spi);
spi_set_chipselect(spi, 0, lookup.chip_select);
ACPI_COMPANION_SET(&spi->dev, adev);
@@ -3328,7 +3313,7 @@ int spi_register_controller(struct spi_controller *ctlr)
}
/* Setting last_cs to SPI_INVALID_CS means no chip selected */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
ctlr->last_cs[idx] = SPI_INVALID_CS;
status = device_add(&ctlr->dev);
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
index f64f4ad4beda..37962ba530df 100644
--- a/include/linux/adi-axi-common.h
+++ b/include/linux/adi-axi-common.h
@@ -8,6 +8,8 @@
* https://wiki.analog.com/resources/fpga/docs/hdl/regmap
*/
+#include <linux/types.h>
+
#ifndef ADI_AXI_COMMON_H_
#define ADI_AXI_COMMON_H_
@@ -21,6 +23,25 @@
#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+/**
+ * adi_axi_pcore_ver_gteq() - check if a version is satisfied
+ * @version: the full version read from the hardware
+ * @major: the major version to compare against
+ * @minor: the minor version to compare against
+ *
+ * ADI AXI IP Cores use semantic versioning, so this can be used to check for
+ * feature availability.
+ *
+ * Return: true if the version is greater than or equal to the specified
+ * major and minor version, false otherwise.
+ */
+static inline bool adi_axi_pcore_ver_gteq(u32 version, u32 major, u32 minor)
+{
+ return ADI_AXI_PCORE_VER_MAJOR(version) > (major) ||
+ (ADI_AXI_PCORE_VER_MAJOR(version) == (major) &&
+ ADI_AXI_PCORE_VER_MINOR(version) >= (minor));
+}
+
#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e9ea43234d9a..cb2c2df31089 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -21,7 +21,7 @@
#include <uapi/linux/spi/spi.h>
/* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 24
+#define SPI_DEVICE_CS_CNT_MAX 4
struct dma_chan;
struct software_node;
@@ -170,6 +170,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* two delays will be added up.
* @chip_select: Array of physical chipselect, spi->chipselect[i] gives
* the corresponding physical CS for logical CS i.
+ * @num_chipselect: Number of physical chipselects used.
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
* @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
* (optional, NULL when not using a GPIO line)
@@ -228,7 +229,8 @@ struct spi_device {
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
- u8 chip_select[SPI_CS_CNT_MAX];
+ u8 chip_select[SPI_DEVICE_CS_CNT_MAX];
+ u8 num_chipselect;
/*
* Bit mask of the chipselect(s) that the driver need to use from
@@ -236,9 +238,9 @@ struct spi_device {
* multiple chip selects & memories are connected in parallel
* then more than one bit need to be set in cs_index_mask.
*/
- u32 cs_index_mask : SPI_CS_CNT_MAX;
+ u32 cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
- struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+ struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */
/*
* Likely need more hooks for more protocol options affecting how
@@ -315,7 +317,7 @@ static inline bool spi_is_csgpiod(struct spi_device *spi)
{
u8 idx;
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
if (spi_get_csgpiod(spi, idx))
return true;
}
@@ -719,8 +721,8 @@ struct spi_controller {
bool auto_runtime_pm;
bool fallback;
bool last_cs_mode_high;
- s8 last_cs[SPI_CS_CNT_MAX];
- u32 last_cs_index_mask : SPI_CS_CNT_MAX;
+ s8 last_cs[SPI_DEVICE_CS_CNT_MAX];
+ u32 last_cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
struct completion xfer_completion;
size_t max_dma_len;
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 7aa2eb766205..6c12db16faa3 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -68,6 +68,7 @@
#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
#define VIRTIO_ID_BT 40 /* virtio bluetooth */
#define VIRTIO_ID_GPIO 41 /* virtio gpio */
+#define VIRTIO_ID_SPI 45 /* virtio spi */
/*
* Virtio Transitional IDs
diff --git a/include/uapi/linux/virtio_spi.h b/include/uapi/linux/virtio_spi.h
new file mode 100644
index 000000000000..8ab3c970cdd3
--- /dev/null
+++ b/include/uapi/linux/virtio_spi.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _LINUX_VIRTIO_VIRTIO_SPI_H
+#define _LINUX_VIRTIO_VIRTIO_SPI_H
+
+#include <linux/types.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_types.h>
+
+/* Sample data on trailing clock edge */
+#define VIRTIO_SPI_CPHA _BITUL(0)
+/* Clock is high when IDLE */
+#define VIRTIO_SPI_CPOL _BITUL(1)
+/* Chip Select is active high */
+#define VIRTIO_SPI_CS_HIGH _BITUL(2)
+/* Transmit LSB first */
+#define VIRTIO_SPI_MODE_LSB_FIRST _BITUL(3)
+/* Loopback mode */
+#define VIRTIO_SPI_MODE_LOOP _BITUL(4)
+
+/**
+ * struct virtio_spi_config - All config fields are read-only for the
+ * Virtio SPI driver
+ * @cs_max_number: maximum number of chipselect the host SPI controller
+ * supports.
+ * @cs_change_supported: indicates if the host SPI controller supports to toggle
+ * chipselect after each transfer in one message:
+ * 0: unsupported, chipselect will be kept in active state throughout the
+ * message transaction;
+ * 1: supported.
+ * Note: Message here contains a sequence of SPI transfers.
+ * @tx_nbits_supported: indicates the supported number of bit for writing:
+ * bit 0: DUAL (2-bit transfer), 1 for supported
+ * bit 1: QUAD (4-bit transfer), 1 for supported
+ * bit 2: OCTAL (8-bit transfer), 1 for supported
+ * other bits are reserved as 0, 1-bit transfer is always supported.
+ * @rx_nbits_supported: indicates the supported number of bit for reading:
+ * bit 0: DUAL (2-bit transfer), 1 for supported
+ * bit 1: QUAD (4-bit transfer), 1 for supported
+ * bit 2: OCTAL (8-bit transfer), 1 for supported
+ * other bits are reserved as 0, 1-bit transfer is always supported.
+ * @bits_per_word_mask: mask indicating which values of bits_per_word are
+ * supported. If not set, no limitation for bits_per_word.
+ * @mode_func_supported: indicates the following features are supported or not:
+ * bit 0-1: CPHA feature
+ * 0b00: invalid, should support as least one CPHA setting
+ * 0b01: supports CPHA=0 only
+ * 0b10: supports CPHA=1 only
+ * 0b11: supports CPHA=0 and CPHA=1.
+ * bit 2-3: CPOL feature
+ * 0b00: invalid, should support as least one CPOL setting
+ * 0b01: supports CPOL=0 only
+ * 0b10: supports CPOL=1 only
+ * 0b11: supports CPOL=0 and CPOL=1.
+ * bit 4: chipselect active high feature, 0 for unsupported and 1 for
+ * supported, chipselect active low is supported by default.
+ * bit 5: LSB first feature, 0 for unsupported and 1 for supported,
+ * MSB first is supported by default.
+ * bit 6: loopback mode feature, 0 for unsupported and 1 for supported,
+ * normal mode is supported by default.
+ * @max_freq_hz: the maximum clock rate supported in Hz unit, 0 means no
+ * limitation for transfer speed.
+ * @max_word_delay_ns: the maximum word delay supported, in nanoseconds.
+ * A value of 0 indicates that word delay is unsupported.
+ * Each transfer may consist of a sequence of words.
+ * @max_cs_setup_ns: the maximum delay supported after chipselect is asserted,
+ * in ns unit, 0 means delay is not supported to introduce after chipselect is
+ * asserted.
+ * @max_cs_hold_ns: the maximum delay supported before chipselect is deasserted,
+ * in ns unit, 0 means delay is not supported to introduce before chipselect
+ * is deasserted.
+ * @max_cs_incative_ns: maximum delay supported after chipselect is deasserted,
+ * in ns unit, 0 means delay is not supported to introduce after chipselect is
+ * deasserted.
+ */
+struct virtio_spi_config {
+ __u8 cs_max_number;
+ __u8 cs_change_supported;
+#define VIRTIO_SPI_RX_TX_SUPPORT_DUAL _BITUL(0)
+#define VIRTIO_SPI_RX_TX_SUPPORT_QUAD _BITUL(1)
+#define VIRTIO_SPI_RX_TX_SUPPORT_OCTAL _BITUL(2)
+ __u8 tx_nbits_supported;
+ __u8 rx_nbits_supported;
+ __le32 bits_per_word_mask;
+#define VIRTIO_SPI_MF_SUPPORT_CPHA_0 _BITUL(0)
+#define VIRTIO_SPI_MF_SUPPORT_CPHA_1 _BITUL(1)
+#define VIRTIO_SPI_MF_SUPPORT_CPOL_0 _BITUL(2)
+#define VIRTIO_SPI_MF_SUPPORT_CPOL_1 _BITUL(3)
+#define VIRTIO_SPI_MF_SUPPORT_CS_HIGH _BITUL(4)
+#define VIRTIO_SPI_MF_SUPPORT_LSB_FIRST _BITUL(5)
+#define VIRTIO_SPI_MF_SUPPORT_LOOPBACK _BITUL(6)
+ __le32 mode_func_supported;
+ __le32 max_freq_hz;
+ __le32 max_word_delay_ns;
+ __le32 max_cs_setup_ns;
+ __le32 max_cs_hold_ns;
+ __le32 max_cs_inactive_ns;
+};
+
+/**
+ * struct spi_transfer_head - virtio SPI transfer descriptor
+ * @chip_select_id: chipselect index the SPI transfer used.
+ * @bits_per_word: the number of bits in each SPI transfer word.
+ * @cs_change: whether to deselect device after finishing this transfer
+ * before starting the next transfer, 0 means cs keep asserted and
+ * 1 means cs deasserted then asserted again.
+ * @tx_nbits: bus width for write transfer.
+ * 0,1: bus width is 1, also known as SINGLE
+ * 2 : bus width is 2, also known as DUAL
+ * 4 : bus width is 4, also known as QUAD
+ * 8 : bus width is 8, also known as OCTAL
+ * other values are invalid.
+ * @rx_nbits: bus width for read transfer.
+ * 0,1: bus width is 1, also known as SINGLE
+ * 2 : bus width is 2, also known as DUAL
+ * 4 : bus width is 4, also known as QUAD
+ * 8 : bus width is 8, also known as OCTAL
+ * other values are invalid.
+ * @reserved: for future use.
+ * @mode: SPI transfer mode.
+ * bit 0: CPHA, determines the timing (i.e. phase) of the data
+ * bits relative to the clock pulses.For CPHA=0, the
+ * "out" side changes the data on the trailing edge of the
+ * preceding clock cycle, while the "in" side captures the data
+ * on (or shortly after) the leading edge of the clock cycle.
+ * For CPHA=1, the "out" side changes the data on the leading
+ * edge of the current clock cycle, while the "in" side
+ * captures the data on (or shortly after) the trailing edge of
+ * the clock cycle.
+ * bit 1: CPOL, determines the polarity of the clock. CPOL=0 is a
+ * clock which idles at 0, and each cycle consists of a pulse
+ * of 1. CPOL=1 is a clock which idles at 1, and each cycle
+ * consists of a pulse of 0.
+ * bit 2: CS_HIGH, if 1, chip select active high, else active low.
+ * bit 3: LSB_FIRST, determines per-word bits-on-wire, if 0, MSB
+ * first, else LSB first.
+ * bit 4: LOOP, loopback mode.
+ * @freq: the transfer speed in Hz.
+ * @word_delay_ns: delay to be inserted between consecutive words of a
+ * transfer, in ns unit.
+ * @cs_setup_ns: delay to be introduced after CS is asserted, in ns
+ * unit.
+ * @cs_delay_hold_ns: delay to be introduced before CS is deasserted
+ * for each transfer, in ns unit.
+ * @cs_change_delay_inactive_ns: delay to be introduced after CS is
+ * deasserted and before next asserted, in ns unit.
+ */
+struct spi_transfer_head {
+ __u8 chip_select_id;
+ __u8 bits_per_word;
+ __u8 cs_change;
+ __u8 tx_nbits;
+ __u8 rx_nbits;
+ __u8 reserved[3];
+ __le32 mode;
+ __le32 freq;
+ __le32 word_delay_ns;
+ __le32 cs_setup_ns;
+ __le32 cs_delay_hold_ns;
+ __le32 cs_change_delay_inactive_ns;
+};
+
+/**
+ * struct spi_transfer_result - virtio SPI transfer result
+ * @result: Transfer result code.
+ * VIRTIO_SPI_TRANS_OK: Transfer successful.
+ * VIRTIO_SPI_PARAM_ERR: Parameter error.
+ * VIRTIO_SPI_TRANS_ERR: Transfer error.
+ */
+struct spi_transfer_result {
+#define VIRTIO_SPI_TRANS_OK 0
+#define VIRTIO_SPI_PARAM_ERR 1
+#define VIRTIO_SPI_TRANS_ERR 2
+ __u8 result;
+};
+
+#endif /* #ifndef _LINUX_VIRTIO_VIRTIO_SPI_H */