diff --git a/Documentation/devicetree/bindings/misc/lwn,bk4-spi.yaml b/Documentation/devicetree/bindings/misc/lwn,bk4-spi.yaml new file mode 100644 index 000000000000..73fbf672e22a --- /dev/null +++ b/Documentation/devicetree/bindings/misc/lwn,bk4-spi.yaml @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/misc/lwn,bk4-spi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Liebherr's BK4 external SPI controller + +maintainers: + - Lukasz Majewski + +description: | + Liebherr's BK4 external SPI controller is a device which handles data + acquisition from compatible industrial peripherals. + The SPI is used for data and management purposes in both master and + slave modes. + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: lwn,bk4-spi + + reg: + maxItems: 1 + + spi-max-frequency: + maximum: 30000000 + + fsl,spi-cs-sck-delay: true + + fsl,spi-sck-cs-delay: true + +required: + - compatible + - spi-max-frequency + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + spidev@0 { + compatible = "lwn,bk4-spi"; + reg = <0>; + spi-max-frequency = <30000000>; + fsl,spi-cs-sck-delay = <200>; + fsl,spi-sck-cs-delay = <400>; + }; + }; diff --git a/Documentation/devicetree/bindings/misc/lwn-bk4.txt b/Documentation/devicetree/bindings/misc/lwn-bk4.txt deleted file mode 100644 index d6a8c188c087..000000000000 --- a/Documentation/devicetree/bindings/misc/lwn-bk4.txt +++ /dev/null @@ -1,26 +0,0 @@ -* Liebherr's BK4 controller external SPI - -A device which handles data acquisition from compatible industrial -peripherals. -The SPI is used for data and management purposes in both master and -slave modes. - -Required properties: - -- compatible : Should be "lwn,bk4" - -Required SPI properties: - -- reg : Should be address of the device chip select within - the controller. - -- spi-max-frequency : Maximum SPI clocking speed of device in Hz, should be - 30MHz at most for the Liebherr's BK4 external bus. - -Example: - -spidev0: spi@0 { - compatible = "lwn,bk4"; - spi-max-frequency = <30000000>; - reg = <0>; -}; diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml index d48ecd6cd5ad..b6bc71d19286 100644 --- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml +++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml @@ -68,6 +68,7 @@ properties: - items: - enum: - amd,pensando-elba-qspi + - amd,versal2-ospi - intel,lgm-qspi - intel,socfpga-qspi - mobileye,eyeq5-ospi diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml index 093150c0cb87..82d051f7bd6e 100644 --- a/Documentation/devicetree/bindings/spi/spi-controller.yaml +++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml @@ -69,6 +69,11 @@ properties: Should be generally avoided and be replaced by spi-cs-high + ACTIVE_HIGH. + The simplest way to obtain an active-high CS signal is to configure the + controller's cs-gpio property with the ACTIVE_HIGH flag and set the + peripheral's spi-cs-high property. See example below for a better + understanding. + fifo-depth: $ref: /schemas/types.yaml#/definitions/uint32 description: @@ -189,3 +194,23 @@ examples: stacked-memories = /bits/ 64 <0x10000000 0x10000000>; }; }; + + - | + #include + + spi@20204000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,bcm2835-spi"; + reg = <0x7e204000 0x1000>; + interrupts = <2 22>; + clocks = <&clk_spi>; + cs-gpios = <&gpio 8 GPIO_ACTIVE_HIGH>; + + display@0 { + compatible = "lg,lg4573"; + spi-max-frequency = <1000000>; + reg = <0>; + spi-cs-high; + }; + }; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 45d7132e5f95..8af13c1c5110 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1229,6 +1229,8 @@ spinand_select_op_variant(struct spinand_device *spinand, if (ret) break; + spi_mem_adjust_op_freq(spinand->spimem, &op); + if (!spi_mem_supports_op(spinand->spimem, &op)) break; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f51f9466e518..ea8a31032927 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -542,6 +542,18 @@ config SPI_JCORE This enables support for the SPI master controller in the J-Core synthesizable, open source SoC. +config SPI_KSPI2 + tristate "Support for KEBA SPI master type 2 hardware" + depends on HAS_IOMEM + depends on KEBA_CP500 || COMPILE_TEST + select AUXILIARY_BUS + help + This driver supports KEBA SPI master type 2 FPGA implementation, + as found on CP500 devices for example. + + This driver can also be built as a module. If so, the module + will be called spi-kspi2. + config SPI_LM70_LLP tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)" depends on PARPORT diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index aea5e54de195..9db7554c1864 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -74,6 +74,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o obj-$(CONFIG_SPI_JCORE) += spi-jcore.o +obj-$(CONFIG_SPI_KSPI2) += spi-kspi2.o obj-$(CONFIG_SPI_LJCA) += spi-ljca.o obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 316bce577081..abdc49d9d940 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -11,11 +11,15 @@ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. */ +#include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -34,6 +38,7 @@ #define QSPI_IDR 0x0018 /* Interrupt Disable Register */ #define QSPI_IMR 0x001c /* Interrupt Mask Register */ #define QSPI_SCR 0x0020 /* Serial Clock Register */ +#define QSPI_SR2 0x0024 /* SAMA7G5 Status Register */ #define QSPI_IAR 0x0030 /* Instruction Address Register */ #define QSPI_ICR 0x0034 /* Instruction Code Register */ @@ -44,16 +49,32 @@ #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ #define QSPI_SKR 0x0044 /* Scrambling Key Register */ +#define QSPI_REFRESH 0x0050 /* Refresh Register */ +#define QSPI_WRACNT 0x0054 /* Write Access Counter Register */ +#define QSPI_DLLCFG 0x0058 /* DLL Configuration Register */ +#define QSPI_PCALCFG 0x005C /* Pad Calibration Configuration Register */ +#define QSPI_PCALBP 0x0060 /* Pad Calibration Bypass Register */ +#define QSPI_TOUT 0x0064 /* Timeout Register */ + #define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */ #define QSPI_WPSR 0x00E8 /* Write Protection Status Register */ #define QSPI_VERSION 0x00FC /* Version Register */ +#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000 +#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000 /* Bitfields in QSPI_CR (Control Register) */ #define QSPI_CR_QSPIEN BIT(0) #define QSPI_CR_QSPIDIS BIT(1) +#define QSPI_CR_DLLON BIT(2) +#define QSPI_CR_DLLOFF BIT(3) +#define QSPI_CR_STPCAL BIT(4) +#define QSPI_CR_SRFRSH BIT(5) #define QSPI_CR_SWRST BIT(7) +#define QSPI_CR_UPDCFG BIT(8) +#define QSPI_CR_STTFR BIT(9) +#define QSPI_CR_RTOUT BIT(10) #define QSPI_CR_LASTXFER BIT(24) /* Bitfields in QSPI_MR (Mode Register) */ @@ -61,12 +82,14 @@ #define QSPI_MR_LLB BIT(1) #define QSPI_MR_WDRBT BIT(2) #define QSPI_MR_SMRM BIT(3) +#define QSPI_MR_DQSDLYEN BIT(3) #define QSPI_MR_CSMODE_MASK GENMASK(5, 4) #define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4) #define QSPI_MR_CSMODE_LASTXFER (1 << 4) #define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4) #define QSPI_MR_NBBITS_MASK GENMASK(11, 8) #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK) +#define QSPI_MR_OENSD BIT(15) #define QSPI_MR_DLYBCT_MASK GENMASK(23, 16) #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK) #define QSPI_MR_DLYCS_MASK GENMASK(31, 24) @@ -80,6 +103,13 @@ #define QSPI_SR_CSR BIT(8) #define QSPI_SR_CSS BIT(9) #define QSPI_SR_INSTRE BIT(10) +#define QSPI_SR_LWRA BIT(11) +#define QSPI_SR_QITF BIT(12) +#define QSPI_SR_QITR BIT(13) +#define QSPI_SR_CSFA BIT(14) +#define QSPI_SR_CSRA BIT(15) +#define QSPI_SR_RFRSHD BIT(16) +#define QSPI_SR_TOUT BIT(17) #define QSPI_SR_QSPIENS BIT(24) #define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR) @@ -92,9 +122,22 @@ #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) +/* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */ +#define QSPI_SR2_SYNCBSY BIT(0) +#define QSPI_SR2_QSPIENS BIT(1) +#define QSPI_SR2_CSS BIT(2) +#define QSPI_SR2_RBUSY BIT(3) +#define QSPI_SR2_HIDLE BIT(4) +#define QSPI_SR2_DLOCK BIT(5) +#define QSPI_SR2_CALBSY BIT(6) + +/* Bitfields in QSPI_IAR (Instruction Address Register) */ +#define QSPI_IAR_ADDR GENMASK(31, 0) + /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */ #define QSPI_ICR_INST_MASK GENMASK(7, 0) #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) +#define QSPI_ICR_INST_MASK_SAMA7G5 GENMASK(15, 0) #define QSPI_ICR_OPT_MASK GENMASK(23, 16) #define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK) @@ -107,6 +150,9 @@ #define QSPI_IFR_WIDTH_QUAD_IO (4 << 0) #define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0) #define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0) +#define QSPI_IFR_WIDTH_OCT_OUTPUT (7 << 0) +#define QSPI_IFR_WIDTH_OCT_IO (8 << 0) +#define QSPI_IFR_WIDTH_OCT_CMD (9 << 0) #define QSPI_IFR_INSTEN BIT(4) #define QSPI_IFR_ADDREN BIT(5) #define QSPI_IFR_OPTEN BIT(6) @@ -117,19 +163,60 @@ #define QSPI_IFR_OPTL_4BIT (2 << 8) #define QSPI_IFR_OPTL_8BIT (3 << 8) #define QSPI_IFR_ADDRL BIT(10) +#define QSPI_IFR_ADDRL_SAMA7G5 GENMASK(11, 10) #define QSPI_IFR_TFRTYP_MEM BIT(12) #define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13) #define QSPI_IFR_CRM BIT(14) +#define QSPI_IFR_DDREN BIT(15) #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) +#define QSPI_IFR_END BIT(22) +#define QSPI_IFR_SMRM BIT(23) #define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */ +#define QSPI_IFR_DQSEN BIT(25) +#define QSPI_IFR_DDRCMDEN BIT(26) +#define QSPI_IFR_HFWBEN BIT(27) +#define QSPI_IFR_PROTTYP GENMASK(29, 28) +#define QSPI_IFR_PROTTYP_STD_SPI 0 +#define QSPI_IFR_PROTTYP_TWIN_QUAD 1 +#define QSPI_IFR_PROTTYP_OCTAFLASH 2 +#define QSPI_IFR_PROTTYP_HYPERFLASH 3 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ #define QSPI_SMR_SCREN BIT(0) #define QSPI_SMR_RVDIS BIT(1) +#define QSPI_SMR_SCRKL BIT(2) + +/* Bitfields in QSPI_REFRESH (Refresh Register) */ +#define QSPI_REFRESH_DELAY_COUNTER GENMASK(31, 0) + +/* Bitfields in QSPI_WRACNT (Write Access Counter Register) */ +#define QSPI_WRACNT_NBWRA GENMASK(31, 0) + +/* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */ +#define QSPI_DLLCFG_RANGE BIT(0) + +/* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */ +#define QSPI_PCALCFG_AAON BIT(0) +#define QSPI_PCALCFG_DAPCAL BIT(1) +#define QSPI_PCALCFG_DIFFPM BIT(2) +#define QSPI_PCALCFG_CLKDIV GENMASK(6, 4) +#define QSPI_PCALCFG_CALCNT GENMASK(16, 8) +#define QSPI_PCALCFG_CALP GENMASK(27, 24) +#define QSPI_PCALCFG_CALN GENMASK(31, 28) + +/* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */ +#define QSPI_PCALBP_BPEN BIT(0) +#define QSPI_PCALBP_CALPBP GENMASK(11, 8) +#define QSPI_PCALBP_CALNBP GENMASK(19, 16) + +/* Bitfields in QSPI_TOUT (Timeout Register) */ +#define QSPI_TOUT_TCNTM GENMASK(15, 0) /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */ #define QSPI_WPMR_WPEN BIT(0) +#define QSPI_WPMR_WPITEN BIT(1) +#define QSPI_WPMR_WPCREN BIT(2) #define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8) #define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK) @@ -138,23 +225,74 @@ #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) -struct atmel_qspi_caps { - bool has_qspick; - bool has_ricr; +#define ATMEL_QSPI_TIMEOUT 1000 /* ms */ +#define ATMEL_QSPI_SYNC_TIMEOUT 300 /* ms */ +#define QSPI_DLLCFG_THRESHOLD_FREQ 90000000U +#define QSPI_CALIB_TIME 2000 /* 2 us */ + +/* Use PIO for small transfers. */ +#define ATMEL_QSPI_DMA_MIN_BYTES 16 +/** + * struct atmel_qspi_pcal - Pad Calibration Clock Division + * @pclk_rate: peripheral clock rate. + * @pclkdiv: calibration clock division. The clock applied to the calibration + * cell is divided by pclkdiv + 1. + */ +struct atmel_qspi_pcal { + u32 pclk_rate; + u8 pclk_div; }; +#define ATMEL_QSPI_PCAL_ARRAY_SIZE 8 +static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = { + {25000000, 0}, + {50000000, 1}, + {75000000, 2}, + {100000000, 3}, + {125000000, 4}, + {150000000, 5}, + {175000000, 6}, + {200000000, 7}, +}; + +struct atmel_qspi_caps { + u32 max_speed_hz; + bool has_qspick; + bool has_gclk; + bool has_ricr; + bool octal; + bool has_dma; +}; + +struct atmel_qspi_ops; + struct atmel_qspi { void __iomem *regs; void __iomem *mem; struct clk *pclk; struct clk *qspick; + struct clk *gclk; struct platform_device *pdev; const struct atmel_qspi_caps *caps; + const struct atmel_qspi_ops *ops; resource_size_t mmap_size; u32 pending; + u32 irq_mask; u32 mr; u32 scr; + u32 target_max_speed_hz; struct completion cmd_completion; + struct completion dma_completion; + dma_addr_t mmap_phys_base; + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +}; + +struct atmel_qspi_ops { + int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op, + u32 *offset); + int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op, + u32 offset); }; struct atmel_qspi_mode { @@ -174,6 +312,19 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; +static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = { + { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, + { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, + { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, + { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO }, + { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO }, + { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD }, + { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, + { 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT }, + { 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO }, + { 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD }, +}; + #ifdef VERBOSE_DEBUG static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) { @@ -196,6 +347,8 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) return "IMR"; case QSPI_SCR: return "SCR"; + case QSPI_SR2: + return "SR2"; case QSPI_IAR: return "IAR"; case QSPI_ICR: @@ -208,6 +361,18 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) return "SMR"; case QSPI_SKR: return "SKR"; + case QSPI_REFRESH: + return "REFRESH"; + case QSPI_WRACNT: + return "WRACNT"; + case QSPI_DLLCFG: + return "DLLCFG"; + case QSPI_PCALCFG: + return "PCALCFG"; + case QSPI_PCALBP: + return "PCALBP"; + case QSPI_TOUT: + return "TOUT"; case QSPI_WPMR: return "WPMR"; case QSPI_WPSR: @@ -249,6 +414,28 @@ static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset) writel_relaxed(value, aq->regs + offset); } +static int atmel_qspi_reg_sync(struct atmel_qspi *aq) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_SYNCBSY), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + return ret; +} + +static int atmel_qspi_update_config(struct atmel_qspi *aq) +{ + int ret; + + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR); + return atmel_qspi_reg_sync(aq); +} + static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, const struct atmel_qspi_mode *mode) { @@ -275,12 +462,31 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op) return -EOPNOTSUPP; } +static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++) + if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i])) + return i; + + return -EOPNOTSUPP; +} + static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); if (!spi_mem_default_supports_op(mem, op)) return false; + if (aq->caps->octal) { + if (atmel_qspi_sama7g5_find_mode(op) < 0) + return false; + else + return true; + } + if (atmel_qspi_find_mode(op) < 0) return false; @@ -292,6 +498,25 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, return true; } +/* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ +static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq) +{ + int ret = 0; + + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); + + if (aq->caps->has_gclk) + ret = atmel_qspi_update_config(aq); + } + + return ret; +} + static int atmel_qspi_set_cfg(struct atmel_qspi *aq, const struct spi_mem_op *op, u32 *offset) { @@ -371,14 +596,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, ifr |= QSPI_IFR_TFRTYP_MEM; } - /* - * If the QSPI controller is set in regular SPI mode, set it in - * Serial Memory Mode (SMM). - */ - if (!(aq->mr & QSPI_MR_SMM)) { - aq->mr |= QSPI_MR_SMM; - atmel_qspi_write(aq->mr, aq, QSPI_MR); - } + mode = atmel_qspi_set_serial_memory_mode(aq); + if (mode < 0) + return mode; /* Clear pending interrupts */ (void)atmel_qspi_read(aq, QSPI_SR); @@ -404,10 +624,326 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, return 0; } +static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask) +{ + int err = 0; + u32 sr; + + /* Poll INSTRuction End status */ + sr = atmel_qspi_read(aq, QSPI_SR); + if ((sr & irq_mask) == irq_mask) + return 0; + + /* Wait for INSTRuction End interrupt */ + reinit_completion(&aq->cmd_completion); + aq->pending = sr & irq_mask; + aq->irq_mask = irq_mask; + atmel_qspi_write(irq_mask, aq, QSPI_IER); + if (!wait_for_completion_timeout(&aq->cmd_completion, + msecs_to_jiffies(ATMEL_QSPI_TIMEOUT))) + err = -ETIMEDOUT; + atmel_qspi_write(irq_mask, aq, QSPI_IDR); + + return err; +} + +static int atmel_qspi_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, u32 offset) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); + + /* Skip to the final steps if there is no data */ + if (!op->data.nbytes) + return atmel_qspi_wait_for_completion(aq, + QSPI_SR_CMD_COMPLETED); + + /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ + (void)atmel_qspi_read(aq, QSPI_IFR); + + /* Send/Receive data */ + if (op->data.dir == SPI_MEM_DATA_IN) { + memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); + + /* Synchronize AHB and APB accesses again */ + rmb(); + } else { + memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); + + /* Synchronize AHB and APB accesses again */ + wmb(); + } + + /* Release the chip-select */ + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED); +} + +static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq, + const struct spi_mem_op *op, u32 *offset) +{ + u32 iar, icr, ifr; + int mode, ret; + + iar = 0; + icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode); + ifr = QSPI_IFR_INSTEN; + + mode = atmel_qspi_sama7g5_find_mode(op); + if (mode < 0) + return mode; + ifr |= atmel_qspi_sama7g5_modes[mode].config; + + if (op->dummy.buswidth && op->dummy.nbytes) { + if (op->addr.dtr && op->dummy.dtr && op->data.dtr) + ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 / + (2 * op->dummy.buswidth)); + else + ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 / + op->dummy.buswidth); + } + + if (op->addr.buswidth && op->addr.nbytes) { + ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) | + QSPI_IFR_ADDREN; + iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val); + } + + if (op->addr.dtr && op->dummy.dtr && op->data.dtr) { + ifr |= QSPI_IFR_DDREN; + if (op->cmd.dtr) + ifr |= QSPI_IFR_DDRCMDEN; + + ifr |= QSPI_IFR_DQSEN; + } + + if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 || + op->data.buswidth == 8) + ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH); + + /* offset of the data access in the QSPI memory space */ + *offset = iar; + + /* Set data enable */ + if (op->data.nbytes) { + ifr |= QSPI_IFR_DATAEN; + + if (op->addr.nbytes) + ifr |= QSPI_IFR_TFRTYP_MEM; + } + + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; + + /* Clear pending interrupts */ + (void)atmel_qspi_read(aq, QSPI_SR); + + /* Set QSPI Instruction Frame registers */ + if (op->addr.nbytes && !op->data.nbytes) + atmel_qspi_write(iar, aq, QSPI_IAR); + + if (op->data.dir == SPI_MEM_DATA_IN) { + atmel_qspi_write(icr, aq, QSPI_RICR); + } else { + atmel_qspi_write(icr, aq, QSPI_WICR); + if (op->data.nbytes) + atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA, + op->data.nbytes), + aq, QSPI_WRACNT); + } + + atmel_qspi_write(ifr, aq, QSPI_IFR); + + return atmel_qspi_update_config(aq); +} + +static void atmel_qspi_dma_callback(void *param) +{ + struct atmel_qspi *aq = param; + + complete(&aq->dma_completion); +} + +static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan, + dma_addr_t dma_dst, dma_addr_t dma_src, + unsigned int len) +{ + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + int ret; + + tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n"); + return -EIO; + } + + reinit_completion(&aq->dma_completion); + tx->callback = atmel_qspi_dma_callback; + tx->callback_param = aq; + cookie = tx->tx_submit(tx); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie); + return ret; + } + + dma_async_issue_pending(chan); + ret = wait_for_completion_timeout(&aq->dma_completion, + msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT)); + if (ret == 0) { + dmaengine_terminate_sync(chan); + dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem, + const struct spi_mem_op *op, + struct sg_table *sgt, loff_t loff) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + struct scatterlist *sg; + dma_addr_t dma_src; + unsigned int i, len; + int ret; + + dma_src = aq->mmap_phys_base + loff; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + len = sg_dma_len(sg); + ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg), + dma_src, len); + if (ret) + return ret; + dma_src += len; + } + + return 0; +} + +static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem, + const struct spi_mem_op *op, + struct sg_table *sgt, loff_t loff) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + struct scatterlist *sg; + dma_addr_t dma_dst; + unsigned int i, len; + int ret; + + dma_dst = aq->mmap_phys_base + loff; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + len = sg_dma_len(sg); + ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst, + sg_dma_address(sg), len); + if (ret) + return ret; + dma_dst += len; + } + + return 0; +} + +static int atmel_qspi_dma_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, loff_t loff) +{ + struct sg_table sgt; + int ret; + + ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op, + &sgt); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff); + else + ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff); + + spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt); + + return ret; +} + +static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, u32 offset) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + u32 val; + int ret; + + if (!op->data.nbytes) { + /* Start the transfer. */ + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA); + } + + /* Send/Receive data. */ + if (op->data.dir == SPI_MEM_DATA_IN) { + if (aq->rx_chan && op->addr.nbytes && + op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) { + ret = atmel_qspi_dma_transfer(mem, op, offset); + if (ret) + return ret; + } else { + memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); + } + + if (op->addr.nbytes) { + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_RBUSY), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + } + } else { + if (aq->tx_chan && op->addr.nbytes && + op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) { + ret = atmel_qspi_dma_transfer(mem, op, offset); + if (ret) + return ret; + } else { + memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); + } + + ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA); + if (ret) + return ret; + } + + /* Release the chip-select. */ + ret = atmel_qspi_reg_sync(aq); + if (ret) { + pm_runtime_mark_last_busy(&aq->pdev->dev); + pm_runtime_put_autosuspend(&aq->pdev->dev); + return ret; + } + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA); +} + static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); - u32 sr, offset; + u32 offset; int err; /* @@ -416,46 +952,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) * when the flash memories overrun the controller's memory space. */ if (op->addr.val + op->data.nbytes > aq->mmap_size) - return -ENOTSUPP; + return -EOPNOTSUPP; + + if (op->addr.nbytes > 4) + return -EOPNOTSUPP; err = pm_runtime_resume_and_get(&aq->pdev->dev); if (err < 0) return err; - err = atmel_qspi_set_cfg(aq, op, &offset); + err = aq->ops->set_cfg(aq, op, &offset); if (err) goto pm_runtime_put; - /* Skip to the final steps if there is no data */ - if (op->data.nbytes) { - /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)atmel_qspi_read(aq, QSPI_IFR); - - /* Send/Receive data */ - if (op->data.dir == SPI_MEM_DATA_IN) - memcpy_fromio(op->data.buf.in, aq->mem + offset, - op->data.nbytes); - else - memcpy_toio(aq->mem + offset, op->data.buf.out, - op->data.nbytes); - - /* Release the chip-select */ - atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); - } - - /* Poll INSTRuction End status */ - sr = atmel_qspi_read(aq, QSPI_SR); - if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) - goto pm_runtime_put; - - /* Wait for INSTRuction End interrupt */ - reinit_completion(&aq->cmd_completion); - aq->pending = sr & QSPI_SR_CMD_COMPLETED; - atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER); - if (!wait_for_completion_timeout(&aq->cmd_completion, - msecs_to_jiffies(1000))) - err = -ETIMEDOUT; - atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); + err = aq->ops->transfer(mem, op, offset); pm_runtime_put: pm_runtime_mark_last_busy(&aq->pdev->dev); @@ -474,6 +984,159 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = { .get_name = atmel_qspi_get_name }; +static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq) +{ + unsigned long pclk_rate; + u32 status, val; + int i, ret; + u8 pclk_div = 0; + + pclk_rate = clk_get_rate(aq->pclk); + if (!pclk_rate) + return -EINVAL; + + for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) { + if (pclk_rate <= pcal[i].pclk_rate) { + pclk_div = pcal[i].pclk_div; + break; + } + } + + /* + * Use the biggest divider in case the peripheral clock exceeds + * 200MHZ. + */ + if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate) + pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div; + + /* Disable QSPI while configuring the pad calibration. */ + status = atmel_qspi_read(aq, QSPI_SR2); + if (status & QSPI_SR2_QSPIENS) { + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + } + + /* + * The analog circuitry is not shut down at the end of the calibration + * and the start-up time is only required for the first calibration + * sequence, thus increasing performance. Set the delay between the Pad + * calibration analog circuitry and the calibration request to 2us. + */ + atmel_qspi_write(QSPI_PCALCFG_AAON | + FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) | + FIELD_PREP(QSPI_PCALCFG_CALCNT, + 2 * (pclk_rate / 1000000)), + aq, QSPI_PCALCFG); + + /* DLL On + start calibration. */ + atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR); + + /* Check synchronization status before updating configuration. */ + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + (val & QSPI_SR2_DLOCK) && + !(val & QSPI_SR2_CALBSY), 40, + ATMEL_QSPI_TIMEOUT); + + /* Refresh analogic blocks every 1 ms.*/ + atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER, + aq->target_max_speed_hz / 1000), + aq, QSPI_REFRESH); + + return ret; +} + +static int atmel_qspi_set_gclk(struct atmel_qspi *aq) +{ + u32 status, val; + int ret; + + /* Disable DLL before setting GCLK */ + status = atmel_qspi_read(aq, QSPI_SR2); + if (status & QSPI_SR2_DLOCK) { + atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR); + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + } + + if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ) + atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG); + else + atmel_qspi_write(0, aq, QSPI_DLLCFG); + + ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz); + if (ret) { + dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n"); + return ret; + } + + /* Enable the QSPI generic clock */ + ret = clk_prepare_enable(aq->gclk); + if (ret) + dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n"); + + return ret; +} + +static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq) +{ + u32 val; + int ret; + + ret = atmel_qspi_set_gclk(aq); + if (ret) + return ret; + + if (aq->caps->octal) { + ret = atmel_qspi_set_pad_calibration(aq); + if (ret) + return ret; + } else { + atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + (val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + } + + /* Set the QSPI controller by default in Serial Memory Mode */ + aq->mr |= QSPI_MR_DQSDLYEN; + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; + + /* Enable the QSPI controller. */ + atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + val & QSPI_SR2_QSPIENS, 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + if (aq->caps->octal) { + ret = readl_poll_timeout(aq->regs + QSPI_SR, val, + val & QSPI_SR_RFRSHD, 40, + ATMEL_QSPI_TIMEOUT); + } + + atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT); + return ret; +} + +static int atmel_qspi_sama7g5_setup(struct spi_device *spi) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller); + + /* The controller can communicate with a single peripheral device (target). */ + aq->target_max_speed_hz = spi->max_speed_hz; + + return atmel_qspi_sama7g5_init(aq); +} + static int atmel_qspi_setup(struct spi_device *spi) { struct spi_controller *ctrl = spi->controller; @@ -488,6 +1151,9 @@ static int atmel_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; + if (aq->caps->has_gclk) + return atmel_qspi_sama7g5_setup(spi); + src_rate = clk_get_rate(aq->pclk); if (!src_rate) return -EINVAL; @@ -573,17 +1239,29 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) return 0; } -static void atmel_qspi_init(struct atmel_qspi *aq) +static int atmel_qspi_init(struct atmel_qspi *aq) { + int ret; + + if (aq->caps->has_gclk) { + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); + return 0; + } + /* Reset the QSPI controller */ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); /* Set the QSPI controller by default in Serial Memory Mode */ - aq->mr |= QSPI_MR_SMM; - atmel_qspi_write(aq->mr, aq, QSPI_MR); + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; /* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); + return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) @@ -599,12 +1277,65 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) return IRQ_NONE; aq->pending |= pending; - if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) + if ((aq->pending & aq->irq_mask) == aq->irq_mask) complete(&aq->cmd_completion); return IRQ_HANDLED; } +static int atmel_qspi_dma_init(struct spi_controller *ctrl) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx"); + if (IS_ERR(aq->rx_chan)) { + aq->rx_chan = NULL; + return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), + "RX DMA channel is not available\n"); + } + + aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx"); + if (IS_ERR(aq->tx_chan)) { + ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan), + "TX DMA channel is not available\n"); + goto release_rx_chan; + } + + ctrl->dma_rx = aq->rx_chan; + ctrl->dma_tx = aq->tx_chan; + init_completion(&aq->dma_completion); + + dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n", + dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan)); + + return 0; + +release_rx_chan: + dma_release_channel(aq->rx_chan); + aq->rx_chan = NULL; + aq->tx_chan = NULL; + return ret; +} + +static void atmel_qspi_dma_release(struct atmel_qspi *aq) +{ + if (aq->rx_chan) + dma_release_channel(aq->rx_chan); + if (aq->tx_chan) + dma_release_channel(aq->tx_chan); +} + +static const struct atmel_qspi_ops atmel_qspi_ops = { + .set_cfg = atmel_qspi_set_cfg, + .transfer = atmel_qspi_transfer, +}; + +static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = { + .set_cfg = atmel_qspi_sama7g5_set_cfg, + .transfer = atmel_qspi_sama7g5_transfer, +}; + static int atmel_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctrl; @@ -616,7 +1347,27 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (!ctrl) return -ENOMEM; + aq = spi_controller_get_devdata(ctrl); + + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + return -EINVAL; + } + + init_completion(&aq->cmd_completion); + aq->pdev = pdev; + ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; + if (aq->caps->octal) + ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; + + if (aq->caps->has_gclk) + aq->ops = &atmel_qspi_sama7g5_ops; + else + aq->ops = &atmel_qspi_ops; + + ctrl->max_speed_hz = aq->caps->max_speed_hz; ctrl->setup = atmel_qspi_setup; ctrl->set_cs_timing = atmel_qspi_set_cs_timing; ctrl->bus_num = -1; @@ -625,11 +1376,6 @@ static int atmel_qspi_probe(struct platform_device *pdev) ctrl->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, ctrl); - aq = spi_controller_get_devdata(ctrl); - - init_completion(&aq->cmd_completion); - aq->pdev = pdev; - /* Map the registers */ aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base"); if (IS_ERR(aq->regs)) @@ -644,57 +1390,52 @@ static int atmel_qspi_probe(struct platform_device *pdev) "missing AHB memory\n"); aq->mmap_size = resource_size(res); + aq->mmap_phys_base = (dma_addr_t)res->start; /* Get the peripheral clock */ - aq->pclk = devm_clk_get(&pdev->dev, "pclk"); + aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); if (IS_ERR(aq->pclk)) - aq->pclk = devm_clk_get(&pdev->dev, NULL); + aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(aq->pclk)) return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk), "missing peripheral clock\n"); - /* Enable the peripheral clock */ - err = clk_prepare_enable(aq->pclk); - if (err) - return dev_err_probe(&pdev->dev, err, - "failed to enable the peripheral clock\n"); - - aq->caps = of_device_get_match_data(&pdev->dev); - if (!aq->caps) { - dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); - err = -EINVAL; - goto disable_pclk; - } - if (aq->caps->has_qspick) { /* Get the QSPI system clock */ - aq->qspick = devm_clk_get(&pdev->dev, "qspick"); + aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick"); if (IS_ERR(aq->qspick)) { dev_err(&pdev->dev, "missing system clock\n"); err = PTR_ERR(aq->qspick); - goto disable_pclk; + return err; } - /* Enable the QSPI system clock */ - err = clk_prepare_enable(aq->qspick); - if (err) { - dev_err(&pdev->dev, - "failed to enable the QSPI system clock\n"); - goto disable_pclk; + } else if (aq->caps->has_gclk) { + /* Get the QSPI generic clock */ + aq->gclk = devm_clk_get(&pdev->dev, "gclk"); + if (IS_ERR(aq->gclk)) { + dev_err(&pdev->dev, "missing Generic clock\n"); + err = PTR_ERR(aq->gclk); + return err; } } + if (aq->caps->has_dma) { + err = atmel_qspi_dma_init(ctrl); + if (err == -EPROBE_DEFER) + return err; + } + /* Request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { err = irq; - goto disable_qspick; + goto dma_release; } err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto disable_qspick; + goto dma_release; pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); @@ -702,7 +1443,9 @@ static int atmel_qspi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); - atmel_qspi_init(aq); + err = atmel_qspi_init(aq); + if (err) + goto dma_release; err = spi_register_controller(ctrl); if (err) { @@ -710,21 +1453,57 @@ static int atmel_qspi_probe(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - goto disable_qspick; + goto dma_release; } pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; -disable_qspick: - clk_disable_unprepare(aq->qspick); -disable_pclk: - clk_disable_unprepare(aq->pclk); +dma_release: + if (aq->caps->has_dma) + atmel_qspi_dma_release(aq); return err; } +static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_RBUSY) && + (val & QSPI_SR2_HIDLE), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_QSPIENS), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + clk_disable_unprepare(aq->gclk); + + atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_CALBSY), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + + return 0; +} + static void atmel_qspi_remove(struct platform_device *pdev) { struct spi_controller *ctrl = platform_get_drvdata(pdev); @@ -735,9 +1514,17 @@ static void atmel_qspi_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret >= 0) { + if (aq->caps->has_dma) + atmel_qspi_dma_release(aq); + + if (aq->caps->has_gclk) { + ret = atmel_qspi_sama7g5_suspend(aq); + if (ret) + dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret); + return; + } + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); - clk_disable(aq->qspick); - clk_disable(aq->pclk); } else { /* * atmel_qspi_runtime_{suspend,resume} just disable and enable @@ -747,9 +1534,6 @@ static void atmel_qspi_remove(struct platform_device *pdev) dev_warn(&pdev->dev, "Failed to resume device on remove\n"); } - clk_unprepare(aq->qspick); - clk_unprepare(aq->pclk); - pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); @@ -765,6 +1549,12 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) if (ret < 0) return ret; + if (aq->caps->has_gclk) { + ret = atmel_qspi_sama7g5_suspend(aq); + clk_disable_unprepare(aq->pclk); + return ret; + } + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); pm_runtime_mark_last_busy(dev); @@ -792,6 +1582,9 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) return ret; } + if (aq->caps->has_gclk) + return atmel_qspi_sama7g5_init(aq); + ret = pm_runtime_force_resume(dev); if (ret < 0) return ret; @@ -847,6 +1640,19 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = { .has_ricr = true, }; +static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = { + .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ, + .has_gclk = true, + .octal = true, + .has_dma = true, +}; + +static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = { + .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ, + .has_gclk = true, + .has_dma = true, +}; + static const struct of_device_id atmel_qspi_dt_ids[] = { { .compatible = "atmel,sama5d2-qspi", @@ -856,6 +1662,15 @@ static const struct of_device_id atmel_qspi_dt_ids[] = { .compatible = "microchip,sam9x60-qspi", .data = &atmel_sam9x60_qspi_caps, }, + { + .compatible = "microchip,sama7g5-ospi", + .data = &atmel_sama7g5_ospi_caps, + }, + { + .compatible = "microchip,sama7g5-qspi", + .data = &atmel_sama7g5_qspi_caps, + }, + { /* sentinel */ } }; diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index d30a21b0b05f..fbe795bbcf50 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -298,19 +298,16 @@ static const struct amd_spi_freq amd_spi_freq[] = { { AMD_SPI_MIN_HZ, F_800KHz, 0}, }; -static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) +static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) { unsigned int i, spd7_val, alt_spd; - if (speed_hz < AMD_SPI_MIN_HZ) - return -EINVAL; - for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++) if (speed_hz >= amd_spi_freq[i].speed_hz) break; if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz) - return 0; + return; amd_spi->speed_hz = amd_spi_freq[i].speed_hz; @@ -329,8 +326,6 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val, AMD_SPI_SPD7_MASK); } - - return 0; } static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi, @@ -479,6 +474,9 @@ static bool amd_spi_supports_op(struct spi_mem *mem, return false; } + if (op->max_freq < mem->spi->controller->min_speed_hz) + return false; + return spi_mem_default_supports_op(mem, op); } @@ -676,9 +674,7 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem, amd_spi = spi_controller_get_devdata(mem->spi->controller); - ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz); - if (ret) - return ret; + amd_set_spi_freq(amd_spi, op->max_freq); if (amd_spi->version == AMD_SPI_V2) amd_set_spi_addr_mode(amd_spi, op); @@ -705,6 +701,10 @@ static const struct spi_controller_mem_ops amd_spi_mem_ops = { .supports_op = amd_spi_supports_op, }; +static const struct spi_controller_mem_caps amd_spi_mem_caps = { + .per_op_freq = true, +}; + static int amd_spi_host_transfer(struct spi_controller *host, struct spi_message *msg) { @@ -782,6 +782,7 @@ static int amd_spi_probe(struct platform_device *pdev) host->setup = amd_spi_host_setup; host->transfer_one_message = amd_spi_host_transfer; host->mem_ops = &amd_spi_mem_ops; + host->mem_caps = &amd_spi_mem_caps; host->max_transfer_size = amd_spi_max_transfer_size; host->max_message_size = amd_spi_max_transfer_size; diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c index fadf6667cd51..18c9aa2cbc29 100644 --- a/drivers/spi/spi-amlogic-spifc-a1.c +++ b/drivers/spi/spi-amlogic-spifc-a1.c @@ -259,7 +259,7 @@ static int amlogic_spifc_a1_exec_op(struct spi_mem *mem, size_t data_size = op->data.nbytes; int ret; - ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz); + ret = amlogic_spifc_a1_set_freq(spifc, op->max_freq); if (ret) return ret; @@ -320,6 +320,10 @@ static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = { .adjust_op_size = amlogic_spifc_a1_adjust_op_size, }; +static const struct spi_controller_mem_caps amlogic_spifc_a1_mem_caps = { + .per_op_freq = true, +}; + static int amlogic_spifc_a1_probe(struct platform_device *pdev) { struct spi_controller *ctrl; @@ -356,6 +360,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev) ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->auto_runtime_pm = true; ctrl->mem_ops = &amlogic_spifc_a1_mem_ops; + ctrl->mem_caps = &amlogic_spifc_a1_mem_caps; ctrl->min_speed_hz = SPIFC_A1_MIN_HZ; ctrl->max_speed_hz = SPIFC_A1_MAX_HZ; ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL | diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index a031ecb358e0..0cd37a7436d5 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -43,10 +43,13 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); #define CQSPI_SLOW_SRAM BIT(4) #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) #define CQSPI_RD_NO_IRQ BIT(6) -#define CQSPI_DISABLE_STIG_MODE BIT(7) +#define CQSPI_DMA_SET_MASK BIT(7) +#define CQSPI_SUPPORT_DEVICE_RESET BIT(8) +#define CQSPI_DISABLE_STIG_MODE BIT(9) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) +#define CQSPI_SUPPORTS_QUAD BIT(1) #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) @@ -111,7 +114,7 @@ struct cqspi_st { struct cqspi_driver_platdata { u32 hwcaps_mask; - u8 quirks; + u16 quirks; int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, u_char *rxbuf, loff_t from_addr, size_t n_rx); u32 (*get_dma_status)(struct cqspi_st *cqspi); @@ -146,6 +149,8 @@ struct cqspi_driver_platdata { #define CQSPI_REG_CONFIG_IDLE_LSB 31 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF #define CQSPI_REG_CONFIG_BAUD_MASK 0xF +#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) +#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) #define CQSPI_REG_RD_INSTR 0x04 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 @@ -832,6 +837,25 @@ failrd: return ret; } +static void cqspi_device_reset(struct cqspi_st *cqspi) +{ + u32 reg; + + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); + reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); + /* + * NOTE: Delay timing implementation is derived from + * spi_nor_hw_reset() + */ + writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(1, 5); + writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(100, 150); + writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(1000, 1200); +} + static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) { void __iomem *reg_base = cqspi->iobase; @@ -1409,7 +1433,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) struct cqspi_flash_pdata *f_pdata; f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; - cqspi_configure(f_pdata, mem->spi->max_speed_hz); + cqspi_configure(f_pdata, op->max_freq); if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { /* @@ -1658,6 +1682,7 @@ static const struct spi_controller_mem_ops cqspi_mem_ops = { static const struct spi_controller_mem_caps cqspi_mem_caps = { .dtr = true, + .per_op_freq = true, }; static int cqspi_setup_flash(struct cqspi_st *cqspi) @@ -1865,6 +1890,8 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->master_ref_clk_hz); if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; + if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) + host->mode_bits |= SPI_TX_QUAD; if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { cqspi->use_direct_mode = true; cqspi->use_direct_mode_wr = true; @@ -1886,8 +1913,7 @@ static int cqspi_probe(struct platform_device *pdev) if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) cqspi->disable_stig_mode = true; - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,versal-ospi-1.0")) { + if (ddata->quirks & CQSPI_DMA_SET_MASK) { ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) goto probe_reset_failed; @@ -1917,6 +1943,9 @@ static int cqspi_probe(struct platform_device *pdev) host->num_chipselect = cqspi->num_chipselect; + if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET) + cqspi_device_reset(cqspi); + if (cqspi->use_direct_mode) { ret = cqspi_request_mmap_dma(cqspi); if (ret == -EPROBE_DEFER) @@ -2037,7 +2066,7 @@ static const struct cqspi_driver_platdata k2g_qspi = { }; static const struct cqspi_driver_platdata am654_ospi = { - .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, .quirks = CQSPI_NEEDS_WR_DELAY, }; @@ -2054,7 +2083,17 @@ static const struct cqspi_driver_platdata socfpga_qspi = { static const struct cqspi_driver_platdata versal_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, - .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA + | CQSPI_DMA_SET_MASK, + .indirect_read_dma = cqspi_versal_indirect_read_dma, + .get_dma_status = cqspi_get_versal_dma_status, +}; + +static const struct cqspi_driver_platdata versal2_ospi = { + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA + | CQSPI_DMA_SET_MASK + | CQSPI_SUPPORT_DEVICE_RESET, .indirect_read_dma = cqspi_versal_indirect_read_dma, .get_dma_status = cqspi_get_versal_dma_status, }; @@ -2111,6 +2150,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "mobileye,eyeq5-ospi", .data = &mobileye_eyeq5_ospi, }, + { + .compatible = "amd,versal2-ospi", + .data = &versal2_ospi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index ea517af9435f..941ecc6f59f8 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -677,7 +677,7 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) * operation. Transmit-only mode is suitable for the rest of them. */ cfg.dfs = 8; - cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq); + cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq); if (op->data.dir == SPI_MEM_DATA_IN) { cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD; cfg.ndf = op->data.nbytes; @@ -894,6 +894,10 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws) dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF); } +static const struct spi_controller_mem_caps dw_spi_mem_caps = { + .per_op_freq = true, +}; + int dw_spi_add_host(struct device *dev, struct dw_spi *dws) { struct spi_controller *host; @@ -941,8 +945,10 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) host->set_cs = dw_spi_set_cs; host->transfer_one = dw_spi_transfer_one; host->handle_err = dw_spi_handle_err; - if (dws->mem_ops.exec_op) + if (dws->mem_ops.exec_op) { host->mem_ops = &dws->mem_ops; + host->mem_caps = &dw_spi_mem_caps; + } host->max_speed_hz = dws->max_freq; host->flags = SPI_CONTROLLER_GPIO_SS; host->auto_runtime_pm = true; diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 9ec53bf0dda8..355e6a39fb41 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -522,9 +522,10 @@ static void fsl_qspi_invalidate(struct fsl_qspi *q) qspi_writel(q, reg, q->iobase + QUADSPI_MCR); } -static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) +static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi, + const struct spi_mem_op *op) { - unsigned long rate = spi->max_speed_hz; + unsigned long rate = op->max_freq; int ret; if (q->selected == spi_get_chipselect(spi, 0)) @@ -652,7 +653,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK), 10, 1000); - fsl_qspi_select_mem(q, mem->spi); + fsl_qspi_select_mem(q, mem->spi, op); if (needs_amba_base_offset(q)) addr_offset = q->memmap_phy; @@ -839,6 +840,10 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { .get_name = fsl_qspi_get_name, }; +static const struct spi_controller_mem_caps fsl_qspi_mem_caps = { + .per_op_freq = true, +}; + static int fsl_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -923,6 +928,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) ctlr->bus_num = -1; ctlr->num_chipselect = 4; ctlr->mem_ops = &fsl_qspi_mem_ops; + ctlr->mem_caps = &fsl_qspi_mem_caps; fsl_qspi_default_setup(q); diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 856a4a9def66..2f2082652a1a 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -618,7 +618,7 @@ static struct spi_controller *fsl_spi_probe(struct device *dev, if (ret < 0) goto err_probe; - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, + dev_info(dev, "at MMIO %pa (irq = %d), %s mode\n", &mem->start, mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); return host; diff --git a/drivers/spi/spi-kspi2.c b/drivers/spi/spi-kspi2.c new file mode 100644 index 000000000000..ca73ec52ce63 --- /dev/null +++ b/drivers/spi/spi-kspi2.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) KEBA Industrial Automation Gmbh 2024 + * + * Driver for KEBA SPI host controller type 2 FPGA IP core + */ + +#include +#include +#include + +#define KSPI2 "kspi2" + +#define KSPI2_CLK_FREQ_REG 0x03 +#define KSPI2_CLK_FREQ_MASK 0x0f +#define KSPI2_CLK_FREQ_62_5M 0x0 +#define KSPI2_CLK_FREQ_33_3M 0x1 +#define KSPI2_CLK_FREQ_125M 0x2 +#define KSPI2_CLK_FREQ_50M 0x3 +#define KSPI2_CLK_FREQ_100M 0x4 + +#define KSPI2_CONTROL_REG 0x04 +#define KSPI2_CONTROL_CLK_DIV_MAX 0x0f +#define KSPI2_CONTROL_CLK_DIV_MASK 0x0f +#define KSPI2_CONTROL_CPHA 0x10 +#define KSPI2_CONTROL_CPOL 0x20 +#define KSPI2_CONTROL_CLK_MODE_MASK 0x30 +#define KSPI2_CONTROL_INIT KSPI2_CONTROL_CLK_DIV_MAX + +#define KSPI2_STATUS_REG 0x08 +#define KSPI2_STATUS_IN_USE 0x01 +#define KSPI2_STATUS_BUSY 0x02 + +#define KSPI2_DATA_REG 0x0c + +#define KSPI2_CS_NR_REG 0x10 +#define KSPI2_CS_NR_NONE 0xff + +#define KSPI2_MODE_BITS (SPI_CPHA | SPI_CPOL) +#define KSPI2_NUM_CS 255 + +#define KSPI2_SPEED_HZ_MIN(kspi) (kspi->base_speed_hz / 65536) +#define KSPI2_SPEED_HZ_MAX(kspi) (kspi->base_speed_hz / 2) + +/* timeout is 10 times the time to transfer one byte at slowest clock */ +#define KSPI2_XFER_TIMEOUT_US(kspi) (USEC_PER_SEC / \ + KSPI2_SPEED_HZ_MIN(kspi) * 8 * 10) + +#define KSPI2_INUSE_SLEEP_US (2 * USEC_PER_MSEC) +#define KSPI2_INUSE_TIMEOUT_US (10 * USEC_PER_SEC) + +struct kspi2 { + struct keba_spi_auxdev *auxdev; + void __iomem *base; + struct spi_controller *host; + + u32 base_speed_hz; /* SPI base clock frequency in HZ */ + u8 control_shadow; + + struct spi_device **device; + int device_size; +}; + +static int kspi2_inuse_lock(struct kspi2 *kspi) +{ + u8 sts; + int ret; + + /* + * The SPI controller has an IN_USE bit for locking access to the + * controller. This enables the use of the SPI controller by other none + * Linux processors. + * + * If the SPI controller is free, then the first read returns + * IN_USE == 0. After that the SPI controller is locked and further + * reads of IN_USE return 1. + * + * The SPI controller is unlocked by writing 1 into IN_USE. + * + * The IN_USE bit acts as a hardware semaphore for the SPI controller. + * Poll for semaphore, but sleep while polling to free the CPU. + */ + ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG, + sts, (sts & KSPI2_STATUS_IN_USE) == 0, + KSPI2_INUSE_SLEEP_US, KSPI2_INUSE_TIMEOUT_US); + if (ret != 0) + dev_warn(&kspi->auxdev->auxdev.dev, "%s err!\n", __func__); + + return ret; +} + +static void kspi2_inuse_unlock(struct kspi2 *kspi) +{ + /* unlock the controller by writing 1 into IN_USE */ + iowrite8(KSPI2_STATUS_IN_USE, kspi->base + KSPI2_STATUS_REG); +} + +static int kspi2_prepare_hardware(struct spi_controller *host) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* lock hardware semaphore before actual use of controller */ + return kspi2_inuse_lock(kspi); +} + +static int kspi2_unprepare_hardware(struct spi_controller *host) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* unlock hardware semaphore after actual use of controller */ + kspi2_inuse_unlock(kspi); + + return 0; +} + +static u8 kspi2_calc_minimal_divider(struct kspi2 *kspi, u32 max_speed_hz) +{ + u8 div; + + /* + * Divider values 2, 4, 8, 16, ..., 65536 are possible. They are coded + * as 0, 1, 2, 3, ..., 15 in the CONTROL_CLK_DIV bit. + */ + for (div = 0; div < KSPI2_CONTROL_CLK_DIV_MAX; div++) { + if ((kspi->base_speed_hz >> (div + 1)) <= max_speed_hz) + return div; + } + + /* return divider for slowest clock if loop fails to find one */ + return KSPI2_CONTROL_CLK_DIV_MAX; +} + +static void kspi2_write_control_reg(struct kspi2 *kspi, u8 val, u8 mask) +{ + /* write control register only when necessary to improve performance */ + if (val != (kspi->control_shadow & mask)) { + kspi->control_shadow = (kspi->control_shadow & ~mask) | val; + iowrite8(kspi->control_shadow, kspi->base + KSPI2_CONTROL_REG); + } +} + +static int kspi2_txrx_byte(struct kspi2 *kspi, u8 tx, u8 *rx) +{ + u8 sts; + int ret; + + /* start transfer by writing TX byte */ + iowrite8(tx, kspi->base + KSPI2_DATA_REG); + + /* wait till finished (BUSY == 0) */ + ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG, + sts, (sts & KSPI2_STATUS_BUSY) == 0, + 0, KSPI2_XFER_TIMEOUT_US(kspi)); + if (ret != 0) + return ret; + + /* read RX byte */ + if (rx) + *rx = ioread8(kspi->base + KSPI2_DATA_REG); + + return 0; +} + +static int kspi2_process_transfer(struct kspi2 *kspi, struct spi_transfer *t) +{ + u8 tx = 0; + u8 rx; + int i; + int ret; + + for (i = 0; i < t->len; i++) { + if (t->tx_buf) + tx = ((const u8 *)t->tx_buf)[i]; + + ret = kspi2_txrx_byte(kspi, tx, &rx); + if (ret) + return ret; + + if (t->rx_buf) + ((u8 *)t->rx_buf)[i] = rx; + } + + return 0; +} + +static int kspi2_setup_transfer(struct kspi2 *kspi, + struct spi_device *spi, + struct spi_transfer *t) +{ + u32 max_speed_hz = spi->max_speed_hz; + u8 clk_div; + + /* + * spi_device (spi) has default parameters. Some of these can be + * overwritten by parameters in spi_transfer (t). + */ + if (t->bits_per_word && ((t->bits_per_word % 8) != 0)) { + dev_err(&spi->dev, "Word width %d not supported!\n", + t->bits_per_word); + + return -EINVAL; + } + + if (t->speed_hz && (t->speed_hz < max_speed_hz)) + max_speed_hz = t->speed_hz; + + clk_div = kspi2_calc_minimal_divider(kspi, max_speed_hz); + kspi2_write_control_reg(kspi, clk_div, KSPI2_CONTROL_CLK_DIV_MASK); + + return 0; +} + +static int kspi2_transfer_one(struct spi_controller *host, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + int ret; + + ret = kspi2_setup_transfer(kspi, spi, t); + if (ret != 0) + return ret; + + if (t->len) { + ret = kspi2_process_transfer(kspi, t); + if (ret != 0) + return ret; + } + + return 0; +} + +static void kspi2_set_cs(struct spi_device *spi, bool enable) +{ + struct spi_controller *host = spi->controller; + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* controller is using active low chip select signals by design */ + if (!enable) + iowrite8(spi_get_chipselect(spi, 0), kspi->base + KSPI2_CS_NR_REG); + else + iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG); +} + +static int kspi2_prepare_message(struct spi_controller *host, + struct spi_message *msg) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + struct spi_device *spi = msg->spi; + u8 mode = 0; + + /* setup SPI clock phase and polarity */ + if (spi->mode & SPI_CPHA) + mode |= KSPI2_CONTROL_CPHA; + if (spi->mode & SPI_CPOL) + mode |= KSPI2_CONTROL_CPOL; + kspi2_write_control_reg(kspi, mode, KSPI2_CONTROL_CLK_MODE_MASK); + + return 0; +} + +static int kspi2_setup(struct spi_device *spi) +{ + struct kspi2 *kspi = spi_controller_get_devdata(spi->controller); + + /* + * Check only parameters. Actual setup is done in kspi2_prepare_message + * and directly before the SPI transfer starts. + */ + + if (spi->mode & ~KSPI2_MODE_BITS) { + dev_err(&spi->dev, "Mode %d not supported!\n", spi->mode); + + return -EINVAL; + } + + if ((spi->bits_per_word % 8) != 0) { + dev_err(&spi->dev, "Word width %d not supported!\n", + spi->bits_per_word); + + return -EINVAL; + } + + if ((spi->max_speed_hz == 0) || + (spi->max_speed_hz > KSPI2_SPEED_HZ_MAX(kspi))) + spi->max_speed_hz = KSPI2_SPEED_HZ_MAX(kspi); + + if (spi->max_speed_hz < KSPI2_SPEED_HZ_MIN(kspi)) { + dev_err(&spi->dev, "Requested speed of %d Hz is too low!\n", + spi->max_speed_hz); + + return -EINVAL; + } + + return 0; +} + +static void kspi2_unregister_devices(struct kspi2 *kspi) +{ + int i; + + for (i = 0; i < kspi->device_size; i++) { + struct spi_device *device = kspi->device[i]; + + if (device) + spi_unregister_device(device); + } +} + +static int kspi2_register_devices(struct kspi2 *kspi) +{ + struct spi_board_info *info = kspi->auxdev->info; + int i; + + /* register all known SPI devices */ + for (i = 0; i < kspi->auxdev->info_size; i++) { + struct spi_device *device = spi_new_device(kspi->host, &info[i]); + + if (!device) { + kspi2_unregister_devices(kspi); + + return -ENODEV; + } + kspi->device[i] = device; + } + + return 0; +} + +static void kspi2_init(struct kspi2 *kspi) +{ + iowrite8(KSPI2_CONTROL_INIT, kspi->base + KSPI2_CONTROL_REG); + kspi->control_shadow = KSPI2_CONTROL_INIT; + + iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG); +} + +static int kspi2_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &auxdev->dev; + struct spi_controller *host; + struct kspi2 *kspi; + u8 clk_reg; + int ret; + + host = devm_spi_alloc_host(dev, sizeof(struct kspi2)); + if (!host) + return -ENOMEM; + kspi = spi_controller_get_devdata(host); + kspi->auxdev = container_of(auxdev, struct keba_spi_auxdev, auxdev); + kspi->host = host; + kspi->device = devm_kcalloc(dev, kspi->auxdev->info_size, + sizeof(*kspi->device), GFP_KERNEL); + if (!kspi->device) + return -ENOMEM; + kspi->device_size = kspi->auxdev->info_size; + auxiliary_set_drvdata(auxdev, kspi); + + kspi->base = devm_ioremap_resource(dev, &kspi->auxdev->io); + if (IS_ERR(kspi->base)) + return PTR_ERR(kspi->base); + + /* read the SPI base clock frequency */ + clk_reg = ioread8(kspi->base + KSPI2_CLK_FREQ_REG); + switch (clk_reg & KSPI2_CLK_FREQ_MASK) { + case KSPI2_CLK_FREQ_62_5M: + kspi->base_speed_hz = 62500000; break; + case KSPI2_CLK_FREQ_33_3M: + kspi->base_speed_hz = 33333333; break; + case KSPI2_CLK_FREQ_125M: + kspi->base_speed_hz = 125000000; break; + case KSPI2_CLK_FREQ_50M: + kspi->base_speed_hz = 50000000; break; + case KSPI2_CLK_FREQ_100M: + kspi->base_speed_hz = 100000000; break; + default: + dev_err(dev, "Undefined SPI base clock frequency!\n"); + return -ENODEV; + } + + kspi2_init(kspi); + + host->bus_num = -1; + host->num_chipselect = KSPI2_NUM_CS; + host->mode_bits = KSPI2_MODE_BITS; + host->setup = kspi2_setup; + host->prepare_transfer_hardware = kspi2_prepare_hardware; + host->unprepare_transfer_hardware = kspi2_unprepare_hardware; + host->prepare_message = kspi2_prepare_message; + host->set_cs = kspi2_set_cs; + host->transfer_one = kspi2_transfer_one; + ret = devm_spi_register_controller(dev, host); + if (ret) { + dev_err(dev, "Failed to register host (%d)!\n", ret); + return ret; + } + + ret = kspi2_register_devices(kspi); + if (ret) { + dev_err(dev, "Failed to register devices (%d)!\n", ret); + return ret; + } + + return 0; +} + +static void kspi2_remove(struct auxiliary_device *auxdev) +{ + struct kspi2 *kspi = auxiliary_get_drvdata(auxdev); + + kspi2_unregister_devices(kspi); +} + +static const struct auxiliary_device_id kspi2_devtype_aux[] = { + { .name = "keba.spi" }, + { }, +}; +MODULE_DEVICE_TABLE(auxiliary, kspi2_devtype_aux); + +static struct auxiliary_driver kspi2_driver_aux = { + .name = KSPI2, + .id_table = kspi2_devtype_aux, + .probe = kspi2_probe, + .remove = kspi2_remove, +}; +module_auxiliary_driver(kspi2_driver_aux); + +MODULE_AUTHOR("Gerhard Engleder "); +MODULE_DESCRIPTION("KEBA SPI host controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index abc6792e738c..a9f0f47f4759 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -187,6 +187,16 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, return false; } + if (op->max_freq && mem->spi->controller->min_speed_hz && + op->max_freq < mem->spi->controller->min_speed_hz) + return false; + + if (op->max_freq && + op->max_freq < mem->spi->max_speed_hz) { + if (!spi_mem_controller_is_capable(ctlr, per_op_freq)) + return false; + } + return spi_mem_check_buswidth(mem, op); } EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); @@ -364,6 +374,9 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) u8 *tmpbuf; int ret; + /* Make sure the operation frequency is correct before going futher */ + spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op); + ret = spi_mem_check_op(op); if (ret) return ret; @@ -410,6 +423,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].tx_buf = tmpbuf; xfers[xferpos].len = op->cmd.nbytes; xfers[xferpos].tx_nbits = op->cmd.buswidth; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen++; @@ -424,6 +438,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].tx_buf = tmpbuf + 1; xfers[xferpos].len = op->addr.nbytes; xfers[xferpos].tx_nbits = op->addr.buswidth; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->addr.nbytes; @@ -435,6 +450,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].len = op->dummy.nbytes; xfers[xferpos].tx_nbits = op->dummy.buswidth; xfers[xferpos].dummy_data = 1; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->dummy.nbytes; @@ -450,6 +466,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) } xfers[xferpos].len = op->data.nbytes; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->data.nbytes; @@ -528,6 +545,53 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) } EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); +/** + * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to + * match controller, PCB and chip limitations + * @mem: the SPI memory + * @op: the operation to adjust + * + * Some chips have per-op frequency limitations and must adapt the maximum + * speed. This function allows SPI mem drivers to set @op->max_freq to the + * maximum supported value. + */ +void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op) +{ + if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz) + op->max_freq = mem->spi->max_speed_hz; +} +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq); + +/** + * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an + * operation. This helps finding the best variant + * among a list of possible choices. + * @op: the operation to benchmark + * + * Some chips have per-op frequency limitations, PCBs usually have their own + * limitations as well, and controllers can support dual, quad or even octal + * modes, sometimes in DTR. All these combinations make it impossible to + * statically list the best combination for all situations. If we want something + * accurate, all these combinations should be rated (eg. with a time estimate) + * and the best pick should be taken based on these calculations. + * + * Returns a ns estimate for the time this op would take. + */ +u64 spi_mem_calc_op_duration(struct spi_mem_op *op) +{ + u64 ncycles = 0; + u32 ns_per_cycles; + + ns_per_cycles = 1000000000 / op->max_freq; + ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); + ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); + + return ncycles * ns_per_cycles; +} +EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration); + static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c index ad2b5ffa6153..fa828fcaaef2 100644 --- a/drivers/spi/spi-microchip-core-qspi.c +++ b/drivers/spi/spi-microchip-core-qspi.c @@ -265,7 +265,8 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id) return ret; } -static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi) +static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi, + const struct spi_mem_op *op) { unsigned long clk_hz; u32 control, baud_rate_val = 0; @@ -274,11 +275,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi if (!clk_hz) return -EINVAL; - baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz); + baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) { dev_err(&spi->dev, "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", - spi->max_speed_hz, clk_hz); + op->max_freq, clk_hz); return -EINVAL; } @@ -399,7 +400,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o if (err) goto error; - err = mchp_coreqspi_setup_clock(qspi, mem->spi); + err = mchp_coreqspi_setup_clock(qspi, mem->spi, op); if (err) goto error; @@ -457,6 +458,10 @@ error: static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller); + unsigned long clk_hz; + u32 baud_rate_val; + if (!spi_mem_default_supports_op(mem, op)) return false; @@ -479,6 +484,14 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_ return false; } + clk_hz = clk_get_rate(qspi->clk); + if (!clk_hz) + return false; + + baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); + if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) + return false; + return true; } @@ -498,6 +511,10 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = { .exec_op = mchp_coreqspi_exec_op, }; +static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = { + .per_op_freq = true, +}; + static int mchp_coreqspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -540,6 +557,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev) ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->mem_ops = &mchp_coreqspi_mem_ops; + ctlr->mem_caps = &mchp_coreqspi_mem_caps; ctlr->setup = mchp_coreqspi_setup_op; ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 85f3bafc975d..197bf2dbe5de 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -961,7 +961,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem, mtk_spi_reset(mdata); mtk_spi_hw_init(mem->spi->controller, mem->spi); - mtk_spi_prepare_transfer(mem->spi->controller, mem->spi->max_speed_hz); + mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq); reg_val = readl(mdata->base + SPI_CFG3_IPM_REG); /* opcode byte len */ @@ -1122,6 +1122,10 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = { .exec_op = mtk_spi_mem_exec_op, }; +static const struct spi_controller_mem_caps mtk_spi_mem_caps = { + .per_op_freq = true, +}; + static int mtk_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1160,6 +1164,7 @@ static int mtk_spi_probe(struct platform_device *pdev) if (mdata->dev_comp->ipm_design) { mdata->dev = dev; host->mem_ops = &mtk_spi_mem_ops; + host->mem_caps = &mtk_spi_mem_caps; init_completion(&mdata->spimem_done); } diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 809767d3145c..eeaea6a5e310 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -522,7 +522,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem, int i, ret; u8 addr[8], cmd[2]; - ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz); + ret = mxic_spi_set_freq(mxic, op->max_freq); if (ret) return ret; @@ -582,6 +582,7 @@ static const struct spi_controller_mem_caps mxic_spi_mem_caps = { .dtr = true, .ecc = true, .swap16 = true, + .per_op_freq = true, }; static void mxic_spi_set_cs(struct spi_device *spi, bool lvl) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index e6d955d964f4..43455305fdf4 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -381,6 +381,8 @@ static int mxs_spi_transfer_one(struct spi_controller *host, if (status) break; + t->effective_speed_hz = ssp->clk_rate; + /* De-assert on last transfer, inverted by cs_change flag */ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ? TXRX_DEASSERT_CS : 0; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 1161b9e5a4dc..bad6b30bab0e 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -705,9 +705,10 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f) * Value for rest of the CS FLSHxxCR0 register would be zero. * */ -static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) +static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi, + const struct spi_mem_op *op) { - unsigned long rate = spi->max_speed_hz; + unsigned long rate = op->max_freq; int ret; uint64_t size_kb; @@ -931,7 +932,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true); WARN_ON(err); - nxp_fspi_select_mem(f, mem->spi); + nxp_fspi_select_mem(f, mem->spi, op); nxp_fspi_prepare_lut(f, op); /* @@ -1149,6 +1150,10 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = { .get_name = nxp_fspi_get_name, }; +static const struct spi_controller_mem_caps nxp_fspi_mem_caps = { + .per_op_freq = true, +}; + static int nxp_fspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -1246,6 +1251,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) ctlr->bus_num = -1; ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; ctlr->mem_ops = &nxp_fspi_mem_ops; + ctlr->mem_caps = &nxp_fspi_mem_caps; nxp_fspi_default_setup(f); diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index 70bbb459caa4..f3fe10eddb6a 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -13,12 +13,14 @@ #include #include #include +#include #include #include #include +#include #include +#include #include -#include #include /* System control */ @@ -110,6 +112,7 @@ #define SFC_VER_3 0x3 #define SFC_VER_4 0x4 #define SFC_VER_5 0x5 +#define SFC_VER_8 0x8 /* Delay line controller register */ #define SFC_DLL_CTRL0 0x3C @@ -150,16 +153,13 @@ /* Data */ #define SFC_DATA 0x108 -/* The controller and documentation reports that it supports up to 4 CS - * devices (0-3), however I have only been able to test a single CS (CS 0) - * due to the configuration of my device. - */ -#define SFC_MAX_CHIPSELECT_NUM 4 +#define SFC_CS1_REG_OFFSET 0x200 + +#define SFC_MAX_CHIPSELECT_NUM 2 -/* The SFC can transfer max 16KB - 1 at one time - * we set it to 15.5KB here for alignment. - */ #define SFC_MAX_IOSIZE_VER3 (512 * 31) +/* Although up to 4GB, 64KB is enough with less mem reserved */ +#define SFC_MAX_IOSIZE_VER4 (0x10000U) /* DMA is only enabled for large data transmission */ #define SFC_DMA_TRANS_THRETHOLD (0x40) @@ -169,12 +169,14 @@ */ #define SFC_MAX_SPEED (150 * 1000 * 1000) +#define ROCKCHIP_AUTOSUSPEND_DELAY 2000 + struct rockchip_sfc { struct device *dev; void __iomem *regbase; struct clk *hclk; struct clk *clk; - u32 frequency; + u32 speed[SFC_MAX_CHIPSELECT_NUM]; /* virtual mapped addr for dma_buffer */ void *buffer; dma_addr_t dma_buffer; @@ -216,6 +218,22 @@ static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc) return SFC_MAX_IOSIZE_VER3; } +static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed) +{ + if (sfc->version >= SFC_VER_8) + return clk_set_rate(sfc->clk, speed * 2); + else + return clk_set_rate(sfc->clk, speed); +} + +static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc) +{ + if (sfc->version >= SFC_VER_8) + return clk_get_rate(sfc->clk) / 2; + else + return clk_get_rate(sfc->clk); +} + static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask) { u32 reg; @@ -302,6 +320,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, u32 len) { u32 ctrl = 0, cmd = 0; + u8 cs = spi_get_chipselect(mem->spi, 0); /* set CMD */ cmd = op->cmd.opcode; @@ -315,7 +334,8 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT; } else { cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT; - writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT); + writel(op->addr.nbytes * 8 - 1, + sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT); } ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT); @@ -347,7 +367,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, /* set the Controller */ ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE; - cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT; + cmd |= cs << SFC_CMD_CS_SHIFT; dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n", op->addr.nbytes, op->addr.buswidth, @@ -355,7 +375,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n", ctrl, cmd, op->addr.val, len); - writel(ctrl, sfc->regbase + SFC_CTRL); + writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL); writel(cmd, sfc->regbase + SFC_CMD); if (op->addr.nbytes) writel(op->addr.val, sfc->regbase + SFC_ADDR); @@ -453,8 +473,10 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc, dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len); - if (op->data.dir == SPI_MEM_DATA_OUT) + if (op->data.dir == SPI_MEM_DATA_OUT) { memcpy(sfc->buffer, op->data.buf.out, len); + dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE); + } ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len); if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) { @@ -462,8 +484,11 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc, ret = -ETIMEDOUT; } rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA); - if (op->data.dir == SPI_MEM_DATA_IN) + + if (op->data.dir == SPI_MEM_DATA_IN) { + dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE); memcpy(op->data.buf.in, sfc->buffer, len); + } return ret; } @@ -473,6 +498,16 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us) int ret = 0; u32 status; + /* + * There is very little data left in fifo, and the controller will + * complete the transmission in a short period of time. + */ + ret = readl_poll_timeout(sfc->regbase + SFC_SR, status, + !(status & SFC_SR_IS_BUSY), + 0, 10); + if (!ret) + return 0; + ret = readl_poll_timeout(sfc->regbase + SFC_SR, status, !(status & SFC_SR_IS_BUSY), 20, timeout_us); @@ -491,14 +526,22 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller); u32 len = op->data.nbytes; int ret; + u8 cs = spi_get_chipselect(mem->spi, 0); - if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) { - ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz); + ret = pm_runtime_get_sync(sfc->dev); + if (ret < 0) { + pm_runtime_put_noidle(sfc->dev); + return ret; + } + + if (unlikely(op->max_freq != sfc->speed[cs]) && + !has_acpi_companion(sfc->dev)) { + ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq); if (ret) - return ret; - sfc->frequency = mem->spi->max_speed_hz; + goto out; + sfc->speed[cs] = op->max_freq; dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n", - sfc->frequency, clk_get_rate(sfc->clk)); + sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc)); } rockchip_sfc_adjust_op_work((struct spi_mem_op *)op); @@ -515,11 +558,17 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op if (ret != len) { dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir); - return -EIO; + ret = -EIO; + goto out; } } - return rockchip_sfc_xfer_done(sfc, 100000); + ret = rockchip_sfc_xfer_done(sfc, 100000); +out: + pm_runtime_mark_last_busy(sfc->dev); + pm_runtime_put_autosuspend(sfc->dev); + + return ret; } static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) @@ -536,6 +585,10 @@ static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = { .adjust_op_size = rockchip_sfc_adjust_op_size, }; +static const struct spi_controller_mem_caps rockchip_sfc_mem_caps = { + .per_op_freq = true, +}; + static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id) { struct rockchip_sfc *sfc = dev_id; @@ -561,6 +614,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev) struct spi_controller *host; struct rockchip_sfc *sfc; int ret; + u32 i, val; host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc)); if (!host) @@ -568,6 +622,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev) host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->mem_ops = &rockchip_sfc_mem_ops; + host->mem_caps = &rockchip_sfc_mem_caps; host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL; host->max_speed_hz = SFC_MAX_SPEED; @@ -581,31 +636,29 @@ static int rockchip_sfc_probe(struct platform_device *pdev) if (IS_ERR(sfc->regbase)) return PTR_ERR(sfc->regbase); - sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc"); + if (!has_acpi_companion(&pdev->dev)) + sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc"); if (IS_ERR(sfc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk), "Failed to get sfc interface clk\n"); - sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc"); + if (!has_acpi_companion(&pdev->dev)) + sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc"); if (IS_ERR(sfc->hclk)) return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk), "Failed to get sfc ahb clk\n"); - sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma"); - - if (sfc->use_dma) { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) { - dev_warn(dev, "Unable to set dma mask\n"); - return ret; - } - - sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3, - &sfc->dma_buffer, GFP_KERNEL); - if (!sfc->buffer) - return -ENOMEM; + if (has_acpi_companion(&pdev->dev)) { + ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to find clock-frequency in ACPI\n"); + for (i = 0; i < SFC_MAX_CHIPSELECT_NUM; i++) + sfc->speed[i] = val; } + sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma"); + ret = clk_prepare_enable(sfc->hclk); if (ret) { dev_err(&pdev->dev, "Failed to enable ahb clk\n"); @@ -630,19 +683,47 @@ static int rockchip_sfc_probe(struct platform_device *pdev) goto err_irq; } + platform_set_drvdata(pdev, sfc); + ret = rockchip_sfc_init(sfc); if (ret) goto err_irq; - sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc); sfc->version = rockchip_sfc_get_version(sfc); + sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc); - ret = spi_register_controller(host); + pm_runtime_set_autosuspend_delay(dev, ROCKCHIP_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_get_noresume(dev); + + if (sfc->use_dma) { + sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32, + get_order(sfc->max_iosize)); + if (!sfc->buffer) { + ret = -ENOMEM; + goto err_dma; + } + sfc->dma_buffer = virt_to_phys(sfc->buffer); + } + + ret = devm_spi_register_controller(dev, host); if (ret) - goto err_irq; + goto err_register; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); return 0; - +err_register: + free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); +err_dma: + pm_runtime_get_sync(dev); + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_dont_use_autosuspend(dev); err_irq: clk_disable_unprepare(sfc->clk); err_clk: @@ -657,11 +738,80 @@ static void rockchip_sfc_remove(struct platform_device *pdev) struct spi_controller *host = sfc->host; spi_unregister_controller(host); + free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); clk_disable_unprepare(sfc->clk); clk_disable_unprepare(sfc->hclk); } +#ifdef CONFIG_PM +static int rockchip_sfc_runtime_suspend(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + + clk_disable_unprepare(sfc->clk); + clk_disable_unprepare(sfc->hclk); + + return 0; +} + +static int rockchip_sfc_runtime_resume(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(sfc->hclk); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(sfc->clk); + if (ret < 0) + clk_disable_unprepare(sfc->hclk); + + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +static int rockchip_sfc_suspend(struct device *dev) +{ + pinctrl_pm_select_sleep_state(dev); + + return pm_runtime_force_suspend(dev); +} + +static int rockchip_sfc_resume(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + + pinctrl_pm_select_default_state(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + rockchip_sfc_init(sfc); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops rockchip_sfc_pm_ops = { + SET_RUNTIME_PM_OPS(rockchip_sfc_runtime_suspend, + rockchip_sfc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(rockchip_sfc_suspend, rockchip_sfc_resume) +}; + static const struct of_device_id rockchip_sfc_dt_ids[] = { { .compatible = "rockchip,sfc"}, { /* sentinel */ } @@ -672,6 +822,7 @@ static struct platform_driver rockchip_sfc_driver = { .driver = { .name = "rockchip-sfc", .of_match_table = rockchip_sfc_dt_ids, + .pm = &rockchip_sfc_pm_ops, }, .probe = rockchip_sfc_probe, .remove = rockchip_sfc_remove, diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index eecf9ea95ae3..1627aa66c965 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -7,13 +7,15 @@ #include #include +#include #include #include #include #include #include -#include #include +#include + #include enum chips { sc18is602, sc18is602b, sc18is603 }; @@ -236,9 +238,7 @@ static int sc18is602_setup(struct spi_device *spi) static int sc18is602_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; - struct device_node *np = dev->of_node; struct sc18is602_platform_data *pdata = dev_get_platdata(dev); struct sc18is602 *hw; struct spi_controller *host; @@ -251,8 +251,9 @@ static int sc18is602_probe(struct i2c_client *client) if (!host) return -ENOMEM; + device_set_node(&host->dev, dev_fwnode(dev)); + hw = spi_controller_get_devdata(host); - i2c_set_clientdata(client, hw); /* assert reset and then release */ hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); @@ -265,11 +266,7 @@ static int sc18is602_probe(struct i2c_client *client) hw->dev = dev; hw->ctrl = 0xff; - if (client->dev.of_node) - hw->id = (uintptr_t)of_device_get_match_data(&client->dev); - else - hw->id = id->driver_data; - + hw->id = (uintptr_t)i2c_get_match_data(client); switch (hw->id) { case sc18is602: case sc18is602b: @@ -278,28 +275,21 @@ static int sc18is602_probe(struct i2c_client *client) break; case sc18is603: host->num_chipselect = 2; - if (pdata) { + if (pdata) hw->freq = pdata->clock_frequency; - } else { - const __be32 *val; - int len; - - val = of_get_property(np, "clock-frequency", &len); - if (val && len >= sizeof(__be32)) - hw->freq = be32_to_cpup(val); - } + else + device_property_read_u32(dev, "clock-frequency", &hw->freq); if (!hw->freq) hw->freq = SC18IS602_CLOCK; break; } - host->bus_num = np ? -1 : client->adapter->nr; + host->bus_num = dev_fwnode(dev) ? -1 : client->adapter->nr; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; host->bits_per_word_mask = SPI_BPW_MASK(8); host->setup = sc18is602_setup; host->transfer_one_message = sc18is602_transfer_one; host->max_transfer_size = sc18is602_max_transfer_size; host->max_message_size = sc18is602_max_transfer_size; - host->dev.of_node = np; host->min_speed_hz = hw->freq / 128; host->max_speed_hz = hw->freq / 4; @@ -314,7 +304,7 @@ static const struct i2c_device_id sc18is602_id[] = { }; MODULE_DEVICE_TABLE(i2c, sc18is602_id); -static const struct of_device_id sc18is602_of_match[] __maybe_unused = { +static const struct of_device_id sc18is602_of_match[] = { { .compatible = "nxp,sc18is602", .data = (void *)sc18is602 @@ -334,7 +324,7 @@ MODULE_DEVICE_TABLE(of, sc18is602_of_match); static struct i2c_driver sc18is602_driver = { .driver = { .name = "sc18is602", - .of_match_table = of_match_ptr(sc18is602_of_match), + .of_match_table = sc18is602_of_match, }, .probe = sc18is602_probe, .id_table = sc18is602_id, diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c index adac645732fe..6ad4b729897e 100644 --- a/drivers/spi/spi-sn-f-ospi.c +++ b/drivers/spi/spi-sn-f-ospi.c @@ -335,7 +335,6 @@ static void f_ospi_config_indir_protocol(struct f_ospi *ospi, static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem, const struct spi_mem_op *op) { - struct spi_device *spi = mem->spi; u32 irq_stat_en; int ret; @@ -343,7 +342,7 @@ static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem, if (ret) return ret; - f_ospi_config_clk(ospi, spi->max_speed_hz); + f_ospi_config_clk(ospi, op->max_freq); f_ospi_config_indir_protocol(ospi, mem, op); @@ -577,6 +576,10 @@ static const struct spi_controller_mem_ops f_ospi_mem_ops = { .exec_op = f_ospi_exec_op, }; +static const struct spi_controller_mem_caps f_ospi_mem_caps = { + .per_op_freq = true, +}; + static int f_ospi_init(struct f_ospi *ospi) { int ret; @@ -614,6 +617,7 @@ static int f_ospi_probe(struct platform_device *pdev) | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST; ctlr->mem_ops = &f_ospi_mem_ops; + ctlr->mem_caps = &f_ospi_mem_caps; ctlr->bus_num = -1; of_property_read_u32(dev->of_node, "num-cs", &num_cs); if (num_cs > OSPI_NUM_CS) { diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 9122350402b5..49516fee74b0 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -623,7 +623,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) { - ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz); + ti_qspi_setup_clk(qspi, op->max_freq); ti_qspi_enable_memory_map(mem->spi); } ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, @@ -658,6 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .adjust_op_size = ti_qspi_adjust_op_size, }; +static const struct spi_controller_mem_caps ti_qspi_mem_caps = { + .per_op_freq = true, +}; + static int ti_qspi_start_transfer_one(struct spi_controller *host, struct spi_message *m) { @@ -777,6 +781,7 @@ static int ti_qspi_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); host->mem_ops = &ti_qspi_mem_ops; + host->mem_caps = &ti_qspi_mem_caps; if (!of_property_read_u32(np, "num-cs", &num_cs)) host->num_chipselect = num_cs; diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index dee9c339a35e..2bd25c75f881 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -318,6 +318,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert) * zynq_qspi_config_op - Configure QSPI controller for specified transfer * @xqspi: Pointer to the zynq_qspi structure * @spi: Pointer to the spi_device structure + * @op: The memory operation to execute * * Sets the operational mode of QSPI controller for the next QSPI transfer and * sets the requested clock frequency. @@ -331,7 +332,8 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert) * controller the driver will set the highest or lowest frequency supported by * controller. */ -static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi) +static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi, + const struct spi_mem_op *op) { u32 config_reg, baud_rate_val = 0; @@ -346,7 +348,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi) */ while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) && (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) > - spi->max_speed_hz) + op->max_freq) baud_rate_val++; config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET); @@ -379,12 +381,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr); + int ret; if (ctlr->busy) return -EBUSY; - clk_enable(qspi->refclk); - clk_enable(qspi->pclk); + ret = clk_enable(qspi->refclk); + if (ret) + return ret; + + ret = clk_enable(qspi->pclk); + if (ret) { + clk_disable(qspi->refclk); + return ret; + } + zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET, ZYNQ_QSPI_ENABLE_ENABLE_MASK); @@ -534,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, op->dummy.buswidth, op->data.buswidth); zynq_qspi_chipselect(mem->spi, true); - zynq_qspi_config_op(xqspi, mem->spi); + zynq_qspi_config_op(xqspi, mem->spi, op); if (op->cmd.opcode) { reinit_completion(&xqspi->data_completion); @@ -620,6 +631,10 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = { .exec_op = zynq_qspi_exec_mem_op, }; +static const struct spi_controller_mem_caps zynq_qspi_mem_caps = { + .per_op_freq = true, +}; + /** * zynq_qspi_probe - Probe method for the QSPI driver * @pdev: Pointer to the platform_device structure @@ -706,6 +721,7 @@ static int zynq_qspi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ctlr->mem_ops = &zynq_qspi_mem_ops; + ctlr->mem_caps = &zynq_qspi_mem_caps; ctlr->setup = zynq_qspi_setup_op; ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; ctlr->dev.of_node = np; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 549a6e0c9654..d800d79f62a7 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -535,7 +535,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi, * zynqmp_qspi_config_op - Configure QSPI controller for specified * transfer * @xqspi: Pointer to the zynqmp_qspi structure - * @qspi: Pointer to the spi_device structure + * @op: The memory operation to execute * * Sets the operational mode of QSPI controller for the next QSPI transfer and * sets the requested clock frequency. @@ -553,12 +553,12 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi, * frequency supported by controller. */ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi, - struct spi_device *qspi) + const struct spi_mem_op *op) { ulong clk_rate; u32 config_reg, req_speed_hz, baud_rate_val = 0; - req_speed_hz = qspi->max_speed_hz; + req_speed_hz = op->max_freq; if (xqspi->speed_hz != req_speed_hz) { xqspi->speed_hz = req_speed_hz; @@ -1072,7 +1072,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem, op->dummy.buswidth, op->data.buswidth); mutex_lock(&xqspi->op_lock); - zynqmp_qspi_config_op(xqspi, mem->spi); + zynqmp_qspi_config_op(xqspi, op); zynqmp_qspi_chipselect(mem->spi, false); genfifoentry |= xqspi->genfifocs; genfifoentry |= xqspi->genfifobus; @@ -1224,6 +1224,10 @@ static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = { .exec_op = zynqmp_qspi_exec_op, }; +static const struct spi_controller_mem_caps zynqmp_qspi_mem_caps = { + .per_op_freq = true, +}; + /** * zynqmp_qspi_probe - Probe method for the QSPI driver * @pdev: Pointer to the platform_device structure @@ -1333,6 +1337,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->mem_ops = &zynqmp_qspi_mem_ops; + ctlr->mem_caps = &zynqmp_qspi_mem_caps; ctlr->setup = zynqmp_qspi_setup_op; ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->dev.of_node = np; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ff1add2ecb91..e0f79773be70 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -410,29 +410,21 @@ static int spi_probe(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); struct spi_device *spi = to_spi_device(dev); + struct fwnode_handle *fwnode = dev_fwnode(dev); int ret; ret = of_clk_set_defaults(dev->of_node, false); if (ret) return ret; - if (dev->of_node) { + if (is_of_node(fwnode)) spi->irq = of_irq_get(dev->of_node, 0); - if (spi->irq == -EPROBE_DEFER) - return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n"); - if (spi->irq < 0) - spi->irq = 0; - } - - if (has_acpi_companion(dev) && spi->irq < 0) { - struct acpi_device *adev = to_acpi_device_node(dev->fwnode); - - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - if (spi->irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - if (spi->irq < 0) - spi->irq = 0; - } + else if (is_acpi_device_node(fwnode) && spi->irq < 0) + spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); + if (spi->irq == -EPROBE_DEFER) + return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); + if (spi->irq < 0) + spi->irq = 0; ret = dev_pm_domain_attach(dev, true); if (ret) @@ -874,15 +866,18 @@ EXPORT_SYMBOL_GPL(spi_new_device); */ void spi_unregister_device(struct spi_device *spi) { + struct fwnode_handle *fwnode; + if (!spi) return; - if (spi->dev.of_node) { - of_node_clear_flag(spi->dev.of_node, OF_POPULATED); - of_node_put(spi->dev.of_node); + fwnode = dev_fwnode(&spi->dev); + if (is_of_node(fwnode)) { + of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); + of_node_put(to_of_node(fwnode)); + } else if (is_acpi_device_node(fwnode)) { + acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); } - if (ACPI_COMPANION(&spi->dev)) - acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_remove_software_node(&spi->dev); device_del(&spi->dev); spi_cleanup(spi); @@ -1059,7 +1054,7 @@ static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool * ambiguity. That's why we use enable, that takes SPI_CS_HIGH * into account. */ - if (has_acpi_companion(&spi->dev)) + if (is_acpi_device_node(dev_fwnode(&spi->dev))) gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); else /* Polarity handled by GPIO library */ @@ -4841,7 +4836,7 @@ extern struct notifier_block spi_of_notifier; #if IS_ENABLED(CONFIG_ACPI) static int spi_acpi_controller_match(struct device *dev, const void *data) { - return ACPI_COMPANION(dev->parent) == data; + return device_match_acpi_dev(dev->parent, data); } struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 653f82984216..58ae4304fdab 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -698,19 +698,24 @@ static const struct class spidev_class = { .name = "spidev", }; +/* + * The spi device ids are expected to match the device names of the + * spidev_dt_ids array below. Both arrays are kept in the same ordering. + */ static const struct spi_device_id spidev_spi_ids[] = { - { .name = "bh2228fv" }, - { .name = "dh2228fv" }, - { .name = "jg10309-01" }, - { .name = "ltc2488" }, - { .name = "sx1301" }, - { .name = "bk4" }, - { .name = "dhcom-board" }, - { .name = "m53cpld" }, - { .name = "spi-petra" }, - { .name = "spi-authenta" }, - { .name = "em3581" }, - { .name = "si3210" }, + { .name = /* cisco */ "spi-petra" }, + { .name = /* dh */ "dhcom-board" }, + { .name = /* elgin */ "jg10309-01" }, + { .name = /* lineartechnology */ "ltc2488" }, + { .name = /* lwn */ "bk4" }, + { .name = /* lwn */ "bk4-spi" }, + { .name = /* menlo */ "m53cpld" }, + { .name = /* micron */ "spi-authenta" }, + { .name = /* rohm */ "bh2228fv" }, + { .name = /* rohm */ "dh2228fv" }, + { .name = /* semtech */ "sx1301" }, + { .name = /* silabs */ "em3581" }, + { .name = /* silabs */ "si3210" }, {}, }; MODULE_DEVICE_TABLE(spi, spidev_spi_ids); @@ -734,6 +739,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check }, { .compatible = "lwn,bk4", .data = &spidev_of_check }, + { .compatible = "lwn,bk4-spi", .data = &spidev_of_check }, { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, { .compatible = "micron,spi-authenta", .data = &spidev_of_check }, { .compatible = "rohm,bh2228fv", .data = &spidev_of_check }, diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index c46d2b8029be..c4830dfaff3d 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -15,16 +15,32 @@ #define SPI_MEM_OP_CMD(__opcode, __buswidth) \ { \ + .nbytes = 1, \ .buswidth = __buswidth, \ .opcode = __opcode, \ + } + +#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \ + { \ .nbytes = 1, \ + .opcode = __opcode, \ + .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ + { \ + .nbytes = __nbytes, \ + .buswidth = __buswidth, \ + .val = __val, \ + } + +#define SPI_MEM_DTR_OP_ADDR(__nbytes, __val, __buswidth) \ { \ .nbytes = __nbytes, \ .val = __val, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_NO_ADDR { } @@ -35,22 +51,47 @@ .buswidth = __buswidth, \ } +#define SPI_MEM_DTR_OP_DUMMY(__nbytes, __buswidth) \ + { \ + .nbytes = __nbytes, \ + .buswidth = __buswidth, \ + .dtr = true, \ + } + #define SPI_MEM_OP_NO_DUMMY { } #define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ + { \ + .buswidth = __buswidth, \ + .dir = SPI_MEM_DATA_IN, \ + .nbytes = __nbytes, \ + .buf.in = __buf, \ + } + +#define SPI_MEM_DTR_OP_DATA_IN(__nbytes, __buf, __buswidth) \ { \ .dir = SPI_MEM_DATA_IN, \ .nbytes = __nbytes, \ .buf.in = __buf, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ + { \ + .buswidth = __buswidth, \ + .dir = SPI_MEM_DATA_OUT, \ + .nbytes = __nbytes, \ + .buf.out = __buf, \ + } + +#define SPI_MEM_DTR_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ { \ .dir = SPI_MEM_DATA_OUT, \ .nbytes = __nbytes, \ .buf.out = __buf, \ .buswidth = __buswidth, \ + .dtr = true, \ } #define SPI_MEM_OP_NO_DATA { } @@ -68,6 +109,9 @@ enum spi_mem_data_dir { SPI_MEM_DATA_OUT, }; +#define SPI_MEM_OP_MAX_FREQ(__freq) \ + .max_freq = __freq + /** * struct spi_mem_op - describes a SPI memory operation * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is @@ -97,6 +141,9 @@ enum spi_mem_data_dir { * operation does not involve transferring data * @data.buf.in: input buffer (must be DMA-able) * @data.buf.out: output buffer (must be DMA-able) + * @max_freq: frequency limitation wrt this operation. 0 means there is no + * specific constraint and the highest achievable frequency can be + * attempted. */ struct spi_mem_op { struct { @@ -135,14 +182,17 @@ struct spi_mem_op { const void *out; } buf; } data; + + unsigned int max_freq; }; -#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data, ...) \ { \ .cmd = __cmd, \ .addr = __addr, \ .dummy = __dummy, \ .data = __data, \ + __VA_ARGS__ \ } /** @@ -302,11 +352,13 @@ struct spi_controller_mem_ops { * @ecc: Supports operations with error correction * @swap16: Supports swapping bytes on a 16 bit boundary when configured in * Octal DTR + * @per_op_freq: Supports per operation frequency switching */ struct spi_controller_mem_caps { bool dtr; bool ecc; bool swap16; + bool per_op_freq; }; #define spi_mem_controller_is_capable(ctlr, cap) \ @@ -371,6 +423,8 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); +void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op); +u64 spi_mem_calc_op_duration(struct spi_mem_op *op); bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op);