1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
4  */
5 
6 #ifndef K3_UDMA_GLUE_H_
7 #define K3_UDMA_GLUE_H_
8 
9 #include <linux/types.h>
10 #include <linux/soc/ti/k3-ringacc.h>
11 #include <linux/dma/ti-cppi5.h>
12 
13 struct k3_udma_glue_tx_channel_cfg {
14 	struct k3_ring_cfg tx_cfg;
15 	struct k3_ring_cfg txcq_cfg;
16 
17 	bool tx_pause_on_err;
18 	bool tx_filt_einfo;
19 	bool tx_filt_pswords;
20 	bool tx_supr_tdpkt;
21 	u32  swdata_size;
22 };
23 
24 struct k3_udma_glue_tx_channel;
25 
26 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
27 		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
28 
29 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
30 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
31 			     struct cppi5_host_desc_t *desc_tx,
32 			     dma_addr_t desc_dma);
33 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
34 			    dma_addr_t *desc_dma);
35 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
36 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
37 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
38 			       bool sync);
39 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
40 		void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
41 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
42 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
43 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
44 
45 enum {
46 	K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
47 	K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
48 	K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
49 	K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
50 };
51 
52 /**
53  * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
54  *
55  * @rx_cfg:		RX ring configuration
56  * @rxfdq_cfg:		RX free Host PD ring configuration
57  * @ring_rxq_id:	RX ring id (or -1 for any)
58  * @ring_rxfdq0_id:	RX free Host PD ring (FDQ) if (or -1 for any)
59  * @rx_error_handling:	Rx Error Handling Mode (0 - drop, 1 - re-try)
60  * @src_tag_lo_sel:	Rx Source Tag Low Byte Selector in Host PD
61  */
62 struct k3_udma_glue_rx_flow_cfg {
63 	struct k3_ring_cfg rx_cfg;
64 	struct k3_ring_cfg rxfdq_cfg;
65 	int ring_rxq_id;
66 	int ring_rxfdq0_id;
67 	bool rx_error_handling;
68 	int src_tag_lo_sel;
69 };
70 
71 /**
72  * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
73  *
74  * @psdata_size:	SW Data is present in Host PD of @swdata_size bytes
75  * @flow_id_base:	first flow_id used by channel.
76  *			if @flow_id_base = -1 - range of GP rflows will be
77  *			allocated dynamically.
78  * @flow_id_num:	number of RX flows used by channel
79  * @flow_id_use_rxchan_id:	use RX channel id as flow id,
80  *				used only if @flow_id_num = 1
81  * @remote		indication that RX channel is remote - some remote CPU
82  *			core owns and control the RX channel. Linux Host only
83  *			allowed to attach and configure RX Flow within RX
84  *			channel. if set - not RX channel operation will be
85  *			performed by K3 NAVSS DMA glue interface.
86  * @def_flow_cfg	default RX flow configuration,
87  *			used only if @flow_id_num = 1
88  */
89 struct k3_udma_glue_rx_channel_cfg {
90 	u32  swdata_size;
91 	int  flow_id_base;
92 	int  flow_id_num;
93 	bool flow_id_use_rxchan_id;
94 	bool remote;
95 
96 	struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
97 };
98 
99 struct k3_udma_glue_rx_channel;
100 
101 struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
102 		struct device *dev,
103 		const char *name,
104 		struct k3_udma_glue_rx_channel_cfg *cfg);
105 
106 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
107 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
108 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
109 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
110 			       bool sync);
111 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
112 		u32 flow_num, struct cppi5_host_desc_t *desc_tx,
113 		dma_addr_t desc_dma);
114 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
115 		u32 flow_num, dma_addr_t *desc_dma);
116 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
117 		u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
118 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
119 				    u32 flow_idx);
120 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
121 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
122 			    u32 flow_num);
123 void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
124 			     u32 flow_num);
125 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
126 		u32 flow_num, void *data,
127 		void (*cleanup)(void *data, dma_addr_t desc_dma),
128 		bool skip_fdq);
129 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
130 				u32 flow_idx);
131 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
132 				 u32 flow_idx);
133 
134 #endif /* K3_UDMA_GLUE_H_ */
135