Lines Matching +full:spi +full:- +full:slave

1 // SPDX-License-Identifier: GPL-2.0
3 // Register map access API - SPI AVMM support
5 // Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
9 #include <linux/spi/spi.h>
12 * This driver implements the regmap operations for a generic SPI
13 * master to access the registers of the spi slave chip which has an
16 * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
17 * in the spi slave chip. The IP acts as a bridge to convert encoded streams of
19 * order to issue register access requests to the slave chip, the host should
27 * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
33 * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
36 * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
44 * driver sends the formatted byte stream over SPI bus to the slave chip.
46 * The spi-avmm IP on the slave chip decodes the byte stream and initiates
71 /* slave's register addr is 32 bits */
73 /* slave's register value is 32 bits */
130 * we should make sure the length is aligned to SPI BPW.
135 * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
143 * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
145 * @spi: spi slave associated with this bridge.
146 * @word_len: bytes of word for spi transfer.
154 * requires the driver to issue formatted requests to spi slave to AVMM bus
158 struct spi_device *spi; member
174 while (count--) { in br_swap_words_32()
181 * Format transaction layer data in br->trans_buf according to the register
182 * access request, Store valid transaction layer data length in br->trans_len.
205 header = (struct trans_req_header *)br->trans_buf; in br_trans_tx_prepare()
206 header->code = code; in br_trans_tx_prepare()
207 header->rsvd = 0; in br_trans_tx_prepare()
208 header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE); in br_trans_tx_prepare()
209 header->addr = cpu_to_be32(reg); in br_trans_tx_prepare()
215 if (trans_len > sizeof(br->trans_buf)) in br_trans_tx_prepare()
216 return -ENOMEM; in br_trans_tx_prepare()
218 data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE); in br_trans_tx_prepare()
225 br->trans_len = trans_len; in br_trans_tx_prepare()
231 * Convert transaction layer data (in br->trans_buf) to phy layer data, store
232 * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
233 * layer data length in br->phy_len.
235 * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
236 * with PHY_IDLE, then the slave will just drop them.
240 * it is possible that if the slave is fast enough to response at the padding
250 * Then if the slave will not get the entire packet before the tx phase is
259 tb = br->trans_buf; in br_pkt_phy_tx_prepare()
260 tb_end = tb + br->trans_len; in br_pkt_phy_tx_prepare()
261 pb = br->phy_buf; in br_pkt_phy_tx_prepare()
262 pb_limit = pb + ARRAY_SIZE(br->phy_buf); in br_pkt_phy_tx_prepare()
281 if (tb == tb_end - 1 && !pb_eop) { in br_pkt_phy_tx_prepare()
312 return -ENOMEM; in br_pkt_phy_tx_prepare()
314 /* Store valid phy data length for spi transfer */ in br_pkt_phy_tx_prepare()
315 br->phy_len = pb - br->phy_buf; in br_pkt_phy_tx_prepare()
317 if (br->word_len == 1) in br_pkt_phy_tx_prepare()
321 aligned_phy_len = ALIGN(br->phy_len, br->word_len); in br_pkt_phy_tx_prepare()
322 if (aligned_phy_len > sizeof(br->phy_buf)) in br_pkt_phy_tx_prepare()
323 return -ENOMEM; in br_pkt_phy_tx_prepare()
325 if (aligned_phy_len == br->phy_len) in br_pkt_phy_tx_prepare()
329 move_size = pb - pb_eop; in br_pkt_phy_tx_prepare()
330 memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size); in br_pkt_phy_tx_prepare()
333 memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len); in br_pkt_phy_tx_prepare()
336 br->phy_len = aligned_phy_len; in br_pkt_phy_tx_prepare()
342 * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
347 /* reorder words for spi transfer */ in br_do_tx()
348 if (br->swap_words) in br_do_tx()
349 br->swap_words(br->phy_buf, br->phy_len); in br_do_tx()
352 return spi_write(br->spi, br->phy_buf, br->phy_len); in br_do_tx()
356 * This function read the rx byte stream from SPI word by word and convert
357 * them to transaction layer data in br->trans_buf. It also stores the length
358 * of rx transaction layer data in br->trans_len
360 * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
369 struct device *dev = &br->spi->dev; in br_do_rx_and_pkt_phy_parse()
374 tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf); in br_do_rx_and_pkt_phy_parse()
375 pb = br->phy_buf; in br_do_rx_and_pkt_phy_parse()
378 ret = spi_read(br->spi, pb, br->word_len); in br_do_rx_and_pkt_phy_parse()
383 if (br->swap_words) in br_do_rx_and_pkt_phy_parse()
384 br->swap_words(pb, br->word_len); in br_do_rx_and_pkt_phy_parse()
387 for (i = 0; i < br->word_len; i++) { in br_do_rx_and_pkt_phy_parse()
400 * a non-zero channel number is found. in br_do_rx_and_pkt_phy_parse()
406 return -EFAULT; in br_do_rx_and_pkt_phy_parse()
418 tb = br->trans_buf; in br_do_rx_and_pkt_phy_parse()
432 return -EFAULT; in br_do_rx_and_pkt_phy_parse()
438 return -EFAULT; in br_do_rx_and_pkt_phy_parse()
445 return -EFAULT; in br_do_rx_and_pkt_phy_parse()
464 br->trans_len = tb - br->trans_buf; in br_do_rx_and_pkt_phy_parse()
485 return -ETIMEDOUT; in br_do_rx_and_pkt_phy_parse()
499 return -EFAULT; in br_do_rx_and_pkt_phy_parse()
509 unsigned int i, trans_len = br->trans_len; in br_rd_trans_rx_parse()
513 return -EFAULT; in br_rd_trans_rx_parse()
515 data = (__le32 *)br->trans_buf; in br_rd_trans_rx_parse()
523 * For write transactions, the slave will return a transaction response
529 unsigned int trans_len = br->trans_len; in br_wr_trans_rx_parse()
535 return -EFAULT; in br_wr_trans_rx_parse()
537 resp = (struct trans_resp_header *)br->trans_buf; in br_wr_trans_rx_parse()
539 code = resp->r_code ^ 0x80; in br_wr_trans_rx_parse()
540 val_len = be16_to_cpu(resp->size); in br_wr_trans_rx_parse()
542 return -EFAULT; in br_wr_trans_rx_parse()
547 return -EFAULT; in br_wr_trans_rx_parse()
559 br->trans_len = 0; in do_reg_access()
560 br->phy_len = 0; in do_reg_access()
589 return -EINVAL; in regmap_spi_avmm_gather_write()
592 return -EINVAL; in regmap_spi_avmm_gather_write()
601 return -EINVAL; in regmap_spi_avmm_write()
605 bytes - SPI_AVMM_REG_SIZE); in regmap_spi_avmm_write()
613 return -EINVAL; in regmap_spi_avmm_read()
616 return -EINVAL; in regmap_spi_avmm_read()
623 spi_avmm_bridge_ctx_gen(struct spi_device *spi) in spi_avmm_bridge_ctx_gen() argument
627 if (!spi) in spi_avmm_bridge_ctx_gen()
628 return ERR_PTR(-ENODEV); in spi_avmm_bridge_ctx_gen()
631 spi->mode = SPI_MODE_1; in spi_avmm_bridge_ctx_gen()
632 spi->bits_per_word = 32; in spi_avmm_bridge_ctx_gen()
633 if (spi_setup(spi)) { in spi_avmm_bridge_ctx_gen()
634 spi->bits_per_word = 8; in spi_avmm_bridge_ctx_gen()
635 if (spi_setup(spi)) in spi_avmm_bridge_ctx_gen()
636 return ERR_PTR(-EINVAL); in spi_avmm_bridge_ctx_gen()
641 return ERR_PTR(-ENOMEM); in spi_avmm_bridge_ctx_gen()
643 br->spi = spi; in spi_avmm_bridge_ctx_gen()
644 br->word_len = spi->bits_per_word / 8; in spi_avmm_bridge_ctx_gen()
645 if (br->word_len == 4) { in spi_avmm_bridge_ctx_gen()
651 br->swap_words = br_swap_words_32; in spi_avmm_bridge_ctx_gen()
673 struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, in __regmap_init_spi_avmm() argument
681 bridge = spi_avmm_bridge_ctx_gen(spi); in __regmap_init_spi_avmm()
685 map = __regmap_init(&spi->dev, &regmap_spi_avmm_bus, in __regmap_init_spi_avmm()
696 struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, in __devm_regmap_init_spi_avmm() argument
704 bridge = spi_avmm_bridge_ctx_gen(spi); in __devm_regmap_init_spi_avmm()
708 map = __devm_regmap_init(&spi->dev, &regmap_spi_avmm_bus, in __devm_regmap_init_spi_avmm()