1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 #include <linux/dma-mapping.h>
6 #include "hal_tx.h"
7 #include "debug.h"
8 #include "hal_desc.h"
9 #include "hif.h"
10 
11 static const struct hal_srng_config hw_srng_config_template[] = {
12 	/* TODO: max_rings can populated by querying HW capabilities */
13 	{ /* REO_DST */
14 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
15 		.max_rings = 4,
16 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
17 		.lmac_ring = false,
18 		.ring_dir = HAL_SRNG_DIR_DST,
19 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
20 	},
21 	{ /* REO_EXCEPTION */
22 		/* Designating REO2TCL ring as exception ring. This ring is
23 		 * similar to other REO2SW rings though it is named as REO2TCL.
24 		 * Any of theREO2SW rings can be used as exception ring.
25 		 */
26 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
27 		.max_rings = 1,
28 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
29 		.lmac_ring = false,
30 		.ring_dir = HAL_SRNG_DIR_DST,
31 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
32 	},
33 	{ /* REO_REINJECT */
34 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
35 		.max_rings = 1,
36 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
37 		.lmac_ring = false,
38 		.ring_dir = HAL_SRNG_DIR_SRC,
39 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
40 	},
41 	{ /* REO_CMD */
42 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
43 		.max_rings = 1,
44 		.entry_size = (sizeof(struct hal_tlv_hdr) +
45 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
46 		.lmac_ring = false,
47 		.ring_dir = HAL_SRNG_DIR_SRC,
48 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
49 	},
50 	{ /* REO_STATUS */
51 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
52 		.max_rings = 1,
53 		.entry_size = (sizeof(struct hal_tlv_hdr) +
54 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
55 		.lmac_ring = false,
56 		.ring_dir = HAL_SRNG_DIR_DST,
57 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
58 	},
59 	{ /* TCL_DATA */
60 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
61 		.max_rings = 3,
62 		.entry_size = (sizeof(struct hal_tlv_hdr) +
63 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
64 		.lmac_ring = false,
65 		.ring_dir = HAL_SRNG_DIR_SRC,
66 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
67 	},
68 	{ /* TCL_CMD */
69 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
70 		.max_rings = 1,
71 		.entry_size = (sizeof(struct hal_tlv_hdr) +
72 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
73 		.lmac_ring =  false,
74 		.ring_dir = HAL_SRNG_DIR_SRC,
75 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
76 	},
77 	{ /* TCL_STATUS */
78 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
79 		.max_rings = 1,
80 		.entry_size = (sizeof(struct hal_tlv_hdr) +
81 			     sizeof(struct hal_tcl_status_ring)) >> 2,
82 		.lmac_ring = false,
83 		.ring_dir = HAL_SRNG_DIR_DST,
84 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
85 	},
86 	{ /* CE_SRC */
87 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
88 		.max_rings = 12,
89 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
90 		.lmac_ring = false,
91 		.ring_dir = HAL_SRNG_DIR_SRC,
92 		.reg_start = {
93 			(HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
94 			 HAL_CE_DST_RING_BASE_LSB),
95 			HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
96 		},
97 		.reg_size = {
98 			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
99 			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
100 			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
101 			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
102 		},
103 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
104 	},
105 	{ /* CE_DST */
106 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
107 		.max_rings = 12,
108 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
109 		.lmac_ring = false,
110 		.ring_dir = HAL_SRNG_DIR_SRC,
111 		.reg_start = {
112 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
113 			 HAL_CE_DST_RING_BASE_LSB),
114 			HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
115 		},
116 		.reg_size = {
117 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
118 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
119 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
120 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
121 		},
122 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
123 	},
124 	{ /* CE_DST_STATUS */
125 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
126 		.max_rings = 12,
127 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
128 		.lmac_ring = false,
129 		.ring_dir = HAL_SRNG_DIR_DST,
130 		.reg_start = {
131 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
132 			 HAL_CE_DST_STATUS_RING_BASE_LSB),
133 			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
134 			 HAL_CE_DST_STATUS_RING_HP),
135 		},
136 		.reg_size = {
137 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
138 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
139 			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
140 			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
141 		},
142 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
143 	},
144 	{ /* WBM_IDLE_LINK */
145 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
146 		.max_rings = 1,
147 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
148 		.lmac_ring = false,
149 		.ring_dir = HAL_SRNG_DIR_SRC,
150 		.reg_start = {
151 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
152 			 HAL_WBM_IDLE_LINK_RING_BASE_LSB),
153 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
154 		},
155 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
156 	},
157 	{ /* SW2WBM_RELEASE */
158 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
159 		.max_rings = 1,
160 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
161 		.lmac_ring = false,
162 		.ring_dir = HAL_SRNG_DIR_SRC,
163 		.reg_start = {
164 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
165 			 HAL_WBM_RELEASE_RING_BASE_LSB),
166 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
167 		},
168 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
169 	},
170 	{ /* WBM2SW_RELEASE */
171 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
172 		.max_rings = 4,
173 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
174 		.lmac_ring = false,
175 		.ring_dir = HAL_SRNG_DIR_DST,
176 		.reg_start = {
177 			(HAL_SEQ_WCSS_UMAC_WBM_REG +
178 			 HAL_WBM0_RELEASE_RING_BASE_LSB),
179 			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
180 		},
181 		.reg_size = {
182 			(HAL_WBM1_RELEASE_RING_BASE_LSB -
183 			 HAL_WBM0_RELEASE_RING_BASE_LSB),
184 			(HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
185 		},
186 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
187 	},
188 	{ /* RXDMA_BUF */
189 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
190 		.max_rings = 2,
191 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
192 		.lmac_ring = true,
193 		.ring_dir = HAL_SRNG_DIR_SRC,
194 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
195 	},
196 	{ /* RXDMA_DST */
197 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
198 		.max_rings = 1,
199 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
200 		.lmac_ring = true,
201 		.ring_dir = HAL_SRNG_DIR_DST,
202 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
203 	},
204 	{ /* RXDMA_MONITOR_BUF */
205 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
206 		.max_rings = 1,
207 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
208 		.lmac_ring = true,
209 		.ring_dir = HAL_SRNG_DIR_SRC,
210 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
211 	},
212 	{ /* RXDMA_MONITOR_STATUS */
213 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
214 		.max_rings = 1,
215 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
216 		.lmac_ring = true,
217 		.ring_dir = HAL_SRNG_DIR_SRC,
218 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
219 	},
220 	{ /* RXDMA_MONITOR_DST */
221 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
222 		.max_rings = 1,
223 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
224 		.lmac_ring = true,
225 		.ring_dir = HAL_SRNG_DIR_DST,
226 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
227 	},
228 	{ /* RXDMA_MONITOR_DESC */
229 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
230 		.max_rings = 1,
231 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
232 		.lmac_ring = true,
233 		.ring_dir = HAL_SRNG_DIR_SRC,
234 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
235 	},
236 	{ /* RXDMA DIR BUF */
237 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
238 		.max_rings = 1,
239 		.entry_size = 8 >> 2, /* TODO: Define the struct */
240 		.lmac_ring = true,
241 		.ring_dir = HAL_SRNG_DIR_SRC,
242 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
243 	},
244 };
245 
ath11k_hal_alloc_cont_rdp(struct ath11k_base * ab)246 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
247 {
248 	struct ath11k_hal *hal = &ab->hal;
249 	size_t size;
250 
251 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
252 	hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
253 					    GFP_KERNEL);
254 	if (!hal->rdp.vaddr)
255 		return -ENOMEM;
256 
257 	return 0;
258 }
259 
ath11k_hal_free_cont_rdp(struct ath11k_base * ab)260 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
261 {
262 	struct ath11k_hal *hal = &ab->hal;
263 	size_t size;
264 
265 	if (!hal->rdp.vaddr)
266 		return;
267 
268 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
269 	dma_free_coherent(ab->dev, size,
270 			  hal->rdp.vaddr, hal->rdp.paddr);
271 	hal->rdp.vaddr = NULL;
272 }
273 
ath11k_hal_alloc_cont_wrp(struct ath11k_base * ab)274 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
275 {
276 	struct ath11k_hal *hal = &ab->hal;
277 	size_t size;
278 
279 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
280 	hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
281 					    GFP_KERNEL);
282 	if (!hal->wrp.vaddr)
283 		return -ENOMEM;
284 
285 	return 0;
286 }
287 
ath11k_hal_free_cont_wrp(struct ath11k_base * ab)288 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
289 {
290 	struct ath11k_hal *hal = &ab->hal;
291 	size_t size;
292 
293 	if (!hal->wrp.vaddr)
294 		return;
295 
296 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
297 	dma_free_coherent(ab->dev, size,
298 			  hal->wrp.vaddr, hal->wrp.paddr);
299 	hal->wrp.vaddr = NULL;
300 }
301 
ath11k_hal_ce_dst_setup(struct ath11k_base * ab,struct hal_srng * srng,int ring_num)302 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
303 				    struct hal_srng *srng, int ring_num)
304 {
305 	struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
306 	u32 addr;
307 	u32 val;
308 
309 	addr = HAL_CE_DST_RING_CTRL +
310 	       srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
311 	       ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
312 
313 	val = ath11k_hif_read32(ab, addr);
314 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
315 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
316 			  srng->u.dst_ring.max_buffer_length);
317 	ath11k_hif_write32(ab, addr, val);
318 }
319 
ath11k_hal_srng_dst_hw_init(struct ath11k_base * ab,struct hal_srng * srng)320 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
321 					struct hal_srng *srng)
322 {
323 	struct ath11k_hal *hal = &ab->hal;
324 	u32 val;
325 	u64 hp_addr;
326 	u32 reg_base;
327 
328 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
329 
330 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
331 		ath11k_hif_write32(ab, reg_base +
332 				   HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
333 				   srng->msi_addr);
334 
335 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
336 				 ((u64)srng->msi_addr >>
337 				  HAL_ADDR_MSB_REG_SHIFT)) |
338 		      HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
339 		ath11k_hif_write32(ab, reg_base +
340 				       HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
341 
342 		ath11k_hif_write32(ab,
343 				   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
344 				   srng->msi_data);
345 	}
346 
347 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
348 
349 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
350 			 ((u64)srng->ring_base_paddr >>
351 			  HAL_ADDR_MSB_REG_SHIFT)) |
352 	      FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
353 			 (srng->entry_size * srng->num_entries));
354 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
355 
356 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
357 	      FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
358 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
359 
360 	/* interrupt setup */
361 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
362 			 (srng->intr_timer_thres_us >> 3));
363 
364 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
365 			  (srng->intr_batch_cntr_thres_entries *
366 			   srng->entry_size));
367 
368 	ath11k_hif_write32(ab,
369 			   reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
370 			   val);
371 
372 	hp_addr = hal->rdp.paddr +
373 		  ((unsigned long)srng->u.dst_ring.hp_addr -
374 		   (unsigned long)hal->rdp.vaddr);
375 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
376 			   hp_addr & HAL_ADDR_LSB_REG_MASK);
377 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
378 			   hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
379 
380 	/* Initialize head and tail pointers to indicate ring is empty */
381 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
382 	ath11k_hif_write32(ab, reg_base, 0);
383 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
384 	*srng->u.dst_ring.hp_addr = 0;
385 
386 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
387 	val = 0;
388 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
389 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
390 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
391 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
392 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
393 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
394 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
395 
396 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
397 }
398 
ath11k_hal_srng_src_hw_init(struct ath11k_base * ab,struct hal_srng * srng)399 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
400 					struct hal_srng *srng)
401 {
402 	struct ath11k_hal *hal = &ab->hal;
403 	u32 val;
404 	u64 tp_addr;
405 	u32 reg_base;
406 
407 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
408 
409 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
410 		ath11k_hif_write32(ab, reg_base +
411 				   HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
412 				   srng->msi_addr);
413 
414 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
415 				 ((u64)srng->msi_addr >>
416 				  HAL_ADDR_MSB_REG_SHIFT)) |
417 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
418 		ath11k_hif_write32(ab, reg_base +
419 				       HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
420 				   val);
421 
422 		ath11k_hif_write32(ab, reg_base +
423 				       HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
424 				   srng->msi_data);
425 	}
426 
427 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
428 
429 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
430 			 ((u64)srng->ring_base_paddr >>
431 			  HAL_ADDR_MSB_REG_SHIFT)) |
432 	      FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
433 			 (srng->entry_size * srng->num_entries));
434 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
435 
436 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
437 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
438 
439 	/* interrupt setup */
440 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
441 	 * unit of 8 usecs instead of 1 usec (as required by v1).
442 	 */
443 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
444 			 srng->intr_timer_thres_us);
445 
446 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
447 			  (srng->intr_batch_cntr_thres_entries *
448 			   srng->entry_size));
449 
450 	ath11k_hif_write32(ab,
451 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
452 			   val);
453 
454 	val = 0;
455 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
456 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
457 				  srng->u.src_ring.low_threshold);
458 	}
459 	ath11k_hif_write32(ab,
460 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
461 			   val);
462 
463 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
464 		tp_addr = hal->rdp.paddr +
465 			  ((unsigned long)srng->u.src_ring.tp_addr -
466 			   (unsigned long)hal->rdp.vaddr);
467 		ath11k_hif_write32(ab,
468 				   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
469 				   tp_addr & HAL_ADDR_LSB_REG_MASK);
470 		ath11k_hif_write32(ab,
471 				   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
472 				   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
473 	}
474 
475 	/* Initialize head and tail pointers to indicate ring is empty */
476 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
477 	ath11k_hif_write32(ab, reg_base, 0);
478 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
479 	*srng->u.src_ring.tp_addr = 0;
480 
481 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
482 	val = 0;
483 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
484 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
485 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
486 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
487 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
488 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
489 
490 	/* Loop count is not used for SRC rings */
491 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
492 
493 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
494 
495 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
496 }
497 
ath11k_hal_srng_hw_init(struct ath11k_base * ab,struct hal_srng * srng)498 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
499 				    struct hal_srng *srng)
500 {
501 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
502 		ath11k_hal_srng_src_hw_init(ab, srng);
503 	else
504 		ath11k_hal_srng_dst_hw_init(ab, srng);
505 }
506 
ath11k_hal_srng_get_ring_id(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id)507 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
508 				       enum hal_ring_type type,
509 				       int ring_num, int mac_id)
510 {
511 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
512 	int ring_id;
513 
514 	if (ring_num >= srng_config->max_rings) {
515 		ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
516 		return -EINVAL;
517 	}
518 
519 	ring_id = srng_config->start_ring_id + ring_num;
520 	if (srng_config->lmac_ring)
521 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
522 
523 	if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
524 		return -EINVAL;
525 
526 	return ring_id;
527 }
528 
ath11k_hal_srng_get_entrysize(struct ath11k_base * ab,u32 ring_type)529 int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
530 {
531 	struct hal_srng_config *srng_config;
532 
533 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
534 		return -EINVAL;
535 
536 	srng_config = &ab->hal.srng_config[ring_type];
537 
538 	return (srng_config->entry_size << 2);
539 }
540 
ath11k_hal_srng_get_max_entries(struct ath11k_base * ab,u32 ring_type)541 int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
542 {
543 	struct hal_srng_config *srng_config;
544 
545 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
546 		return -EINVAL;
547 
548 	srng_config = &ab->hal.srng_config[ring_type];
549 
550 	return (srng_config->max_size / srng_config->entry_size);
551 }
552 
ath11k_hal_srng_get_params(struct ath11k_base * ab,struct hal_srng * srng,struct hal_srng_params * params)553 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
554 				struct hal_srng_params *params)
555 {
556 	params->ring_base_paddr = srng->ring_base_paddr;
557 	params->ring_base_vaddr = srng->ring_base_vaddr;
558 	params->num_entries = srng->num_entries;
559 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
560 	params->intr_batch_cntr_thres_entries =
561 		srng->intr_batch_cntr_thres_entries;
562 	params->low_threshold = srng->u.src_ring.low_threshold;
563 	params->msi_addr = srng->msi_addr;
564 	params->msi_data = srng->msi_data;
565 	params->flags = srng->flags;
566 }
567 
ath11k_hal_srng_get_hp_addr(struct ath11k_base * ab,struct hal_srng * srng)568 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
569 				       struct hal_srng *srng)
570 {
571 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
572 		return 0;
573 
574 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
575 		return ab->hal.wrp.paddr +
576 		       ((unsigned long)srng->u.src_ring.hp_addr -
577 			(unsigned long)ab->hal.wrp.vaddr);
578 	else
579 		return ab->hal.rdp.paddr +
580 		       ((unsigned long)srng->u.dst_ring.hp_addr -
581 			 (unsigned long)ab->hal.rdp.vaddr);
582 }
583 
ath11k_hal_srng_get_tp_addr(struct ath11k_base * ab,struct hal_srng * srng)584 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
585 				       struct hal_srng *srng)
586 {
587 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
588 		return 0;
589 
590 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
591 		return ab->hal.rdp.paddr +
592 		       ((unsigned long)srng->u.src_ring.tp_addr -
593 			(unsigned long)ab->hal.rdp.vaddr);
594 	else
595 		return ab->hal.wrp.paddr +
596 		       ((unsigned long)srng->u.dst_ring.tp_addr -
597 			(unsigned long)ab->hal.wrp.vaddr);
598 }
599 
ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)600 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
601 {
602 	switch (type) {
603 	case HAL_CE_DESC_SRC:
604 		return sizeof(struct hal_ce_srng_src_desc);
605 	case HAL_CE_DESC_DST:
606 		return sizeof(struct hal_ce_srng_dest_desc);
607 	case HAL_CE_DESC_DST_STATUS:
608 		return sizeof(struct hal_ce_srng_dst_status_desc);
609 	}
610 
611 	return 0;
612 }
613 
ath11k_hal_ce_src_set_desc(void * buf,dma_addr_t paddr,u32 len,u32 id,u8 byte_swap_data)614 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
615 				u8 byte_swap_data)
616 {
617 	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
618 
619 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
620 	desc->buffer_addr_info =
621 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
622 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
623 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
624 			   byte_swap_data) |
625 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
626 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
627 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
628 }
629 
ath11k_hal_ce_dst_set_desc(void * buf,dma_addr_t paddr)630 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
631 {
632 	struct hal_ce_srng_dest_desc *desc =
633 		(struct hal_ce_srng_dest_desc *)buf;
634 
635 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
636 	desc->buffer_addr_info =
637 		FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
638 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
639 }
640 
ath11k_hal_ce_dst_status_get_length(void * buf)641 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
642 {
643 	struct hal_ce_srng_dst_status_desc *desc =
644 		(struct hal_ce_srng_dst_status_desc *)buf;
645 	u32 len;
646 
647 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
648 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
649 
650 	return len;
651 }
652 
ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc * desc,u32 cookie,dma_addr_t paddr)653 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
654 				   dma_addr_t paddr)
655 {
656 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
657 					       (paddr & HAL_ADDR_LSB_REG_MASK));
658 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
659 					       ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
660 				    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
661 				    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
662 }
663 
ath11k_hal_srng_dst_peek(struct ath11k_base * ab,struct hal_srng * srng)664 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
665 {
666 	lockdep_assert_held(&srng->lock);
667 
668 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
669 		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
670 
671 	return NULL;
672 }
673 
ath11k_hal_srng_dst_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)674 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
675 					struct hal_srng *srng)
676 {
677 	u32 *desc;
678 
679 	lockdep_assert_held(&srng->lock);
680 
681 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
682 		return NULL;
683 
684 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
685 
686 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
687 			      srng->ring_size;
688 
689 	return desc;
690 }
691 
ath11k_hal_srng_dst_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)692 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
693 				 bool sync_hw_ptr)
694 {
695 	u32 tp, hp;
696 
697 	lockdep_assert_held(&srng->lock);
698 
699 	tp = srng->u.dst_ring.tp;
700 
701 	if (sync_hw_ptr) {
702 		hp = *srng->u.dst_ring.hp_addr;
703 		srng->u.dst_ring.cached_hp = hp;
704 	} else {
705 		hp = srng->u.dst_ring.cached_hp;
706 	}
707 
708 	if (hp >= tp)
709 		return (hp - tp) / srng->entry_size;
710 	else
711 		return (srng->ring_size - tp + hp) / srng->entry_size;
712 }
713 
714 /* Returns number of available entries in src ring */
ath11k_hal_srng_src_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)715 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
716 				 bool sync_hw_ptr)
717 {
718 	u32 tp, hp;
719 
720 	lockdep_assert_held(&srng->lock);
721 
722 	hp = srng->u.src_ring.hp;
723 
724 	if (sync_hw_ptr) {
725 		tp = *srng->u.src_ring.tp_addr;
726 		srng->u.src_ring.cached_tp = tp;
727 	} else {
728 		tp = srng->u.src_ring.cached_tp;
729 	}
730 
731 	if (tp > hp)
732 		return ((tp - hp) / srng->entry_size) - 1;
733 	else
734 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
735 }
736 
ath11k_hal_srng_src_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)737 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
738 					struct hal_srng *srng)
739 {
740 	u32 *desc;
741 	u32 next_hp;
742 
743 	lockdep_assert_held(&srng->lock);
744 
745 	/* TODO: Using % is expensive, but we have to do this since size of some
746 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
747 	 * if separate function is defined for rings having power of 2 ring size
748 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
749 	 * overhead of % by using mask (with &).
750 	 */
751 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
752 
753 	if (next_hp == srng->u.src_ring.cached_tp)
754 		return NULL;
755 
756 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
757 	srng->u.src_ring.hp = next_hp;
758 
759 	/* TODO: Reap functionality is not used by all rings. If particular
760 	 * ring does not use reap functionality, we need not update reap_hp
761 	 * with next_hp pointer. Need to make sure a separate function is used
762 	 * before doing any optimization by removing below code updating
763 	 * reap_hp.
764 	 */
765 	srng->u.src_ring.reap_hp = next_hp;
766 
767 	return desc;
768 }
769 
ath11k_hal_srng_src_reap_next(struct ath11k_base * ab,struct hal_srng * srng)770 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
771 				   struct hal_srng *srng)
772 {
773 	u32 *desc;
774 	u32 next_reap_hp;
775 
776 	lockdep_assert_held(&srng->lock);
777 
778 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
779 		       srng->ring_size;
780 
781 	if (next_reap_hp == srng->u.src_ring.cached_tp)
782 		return NULL;
783 
784 	desc = srng->ring_base_vaddr + next_reap_hp;
785 	srng->u.src_ring.reap_hp = next_reap_hp;
786 
787 	return desc;
788 }
789 
ath11k_hal_srng_src_get_next_reaped(struct ath11k_base * ab,struct hal_srng * srng)790 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
791 					 struct hal_srng *srng)
792 {
793 	u32 *desc;
794 
795 	lockdep_assert_held(&srng->lock);
796 
797 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
798 		return NULL;
799 
800 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
801 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
802 			      srng->ring_size;
803 
804 	return desc;
805 }
806 
ath11k_hal_srng_src_peek(struct ath11k_base * ab,struct hal_srng * srng)807 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
808 {
809 	lockdep_assert_held(&srng->lock);
810 
811 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
812 	    srng->u.src_ring.cached_tp)
813 		return NULL;
814 
815 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
816 }
817 
ath11k_hal_srng_access_begin(struct ath11k_base * ab,struct hal_srng * srng)818 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
819 {
820 	lockdep_assert_held(&srng->lock);
821 
822 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
823 		srng->u.src_ring.cached_tp =
824 			*(volatile u32 *)srng->u.src_ring.tp_addr;
825 	else
826 		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
827 }
828 
829 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
830  * should have been called before this.
831  */
ath11k_hal_srng_access_end(struct ath11k_base * ab,struct hal_srng * srng)832 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
833 {
834 	lockdep_assert_held(&srng->lock);
835 
836 	/* TODO: See if we need a write memory barrier here */
837 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
838 		/* For LMAC rings, ring pointer updates are done through FW and
839 		 * hence written to a shared memory location that is read by FW
840 		 */
841 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
842 			srng->u.src_ring.last_tp =
843 				*(volatile u32 *)srng->u.src_ring.tp_addr;
844 			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
845 		} else {
846 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
847 			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
848 		}
849 	} else {
850 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
851 			srng->u.src_ring.last_tp =
852 				*(volatile u32 *)srng->u.src_ring.tp_addr;
853 			ath11k_hif_write32(ab,
854 					   (unsigned long)srng->u.src_ring.hp_addr -
855 					   (unsigned long)ab->mem,
856 					   srng->u.src_ring.hp);
857 		} else {
858 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
859 			ath11k_hif_write32(ab,
860 					   (unsigned long)srng->u.dst_ring.tp_addr -
861 					   (unsigned long)ab->mem,
862 					   srng->u.dst_ring.tp);
863 		}
864 	}
865 
866 	srng->timestamp = jiffies;
867 }
868 
ath11k_hal_setup_link_idle_list(struct ath11k_base * ab,struct hal_wbm_idle_scatter_list * sbuf,u32 nsbufs,u32 tot_link_desc,u32 end_offset)869 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
870 				     struct hal_wbm_idle_scatter_list *sbuf,
871 				     u32 nsbufs, u32 tot_link_desc,
872 				     u32 end_offset)
873 {
874 	struct ath11k_buffer_addr *link_addr;
875 	int i;
876 	u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
877 
878 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
879 
880 	for (i = 1; i < nsbufs; i++) {
881 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
882 		link_addr->info1 = FIELD_PREP(
883 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
884 				(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
885 				FIELD_PREP(
886 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
887 				BASE_ADDR_MATCH_TAG_VAL);
888 
889 		link_addr = (void *)sbuf[i].vaddr +
890 			     HAL_WBM_IDLE_SCATTER_BUF_SIZE;
891 	}
892 
893 	ath11k_hif_write32(ab,
894 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
895 			   FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
896 			   FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
897 	ath11k_hif_write32(ab,
898 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
899 			   FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
900 				      reg_scatter_buf_sz * nsbufs));
901 	ath11k_hif_write32(ab,
902 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
903 			   HAL_WBM_SCATTERED_RING_BASE_LSB,
904 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
905 				      sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
906 	ath11k_hif_write32(ab,
907 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
908 			   HAL_WBM_SCATTERED_RING_BASE_MSB,
909 			   FIELD_PREP(
910 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
911 				(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
912 				FIELD_PREP(
913 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
914 				BASE_ADDR_MATCH_TAG_VAL));
915 
916 	/* Setup head and tail pointers for the idle list */
917 	ath11k_hif_write32(ab,
918 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
919 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
920 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
921 				      sbuf[nsbufs - 1].paddr));
922 	ath11k_hif_write32(ab,
923 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
924 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
925 			   FIELD_PREP(
926 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
927 				((u64)sbuf[nsbufs - 1].paddr >>
928 				 HAL_ADDR_MSB_REG_SHIFT)) |
929 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
930 				      (end_offset >> 2)));
931 	ath11k_hif_write32(ab,
932 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
933 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
934 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
935 				      sbuf[0].paddr));
936 
937 	ath11k_hif_write32(ab,
938 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
939 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
940 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
941 				      sbuf[0].paddr));
942 	ath11k_hif_write32(ab,
943 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
944 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
945 			   FIELD_PREP(
946 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
947 				((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
948 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
949 				      0));
950 	ath11k_hif_write32(ab,
951 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
952 			   HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
953 			   2 * tot_link_desc);
954 
955 	/* Enable the SRNG */
956 	ath11k_hif_write32(ab,
957 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
958 			   HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
959 }
960 
ath11k_hal_srng_setup(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id,struct hal_srng_params * params)961 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
962 			  int ring_num, int mac_id,
963 			  struct hal_srng_params *params)
964 {
965 	struct ath11k_hal *hal = &ab->hal;
966 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
967 	struct hal_srng *srng;
968 	int ring_id;
969 	u32 lmac_idx;
970 	int i;
971 	u32 reg_base;
972 
973 	ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
974 	if (ring_id < 0)
975 		return ring_id;
976 
977 	srng = &hal->srng_list[ring_id];
978 
979 	srng->ring_id = ring_id;
980 	srng->ring_dir = srng_config->ring_dir;
981 	srng->ring_base_paddr = params->ring_base_paddr;
982 	srng->ring_base_vaddr = params->ring_base_vaddr;
983 	srng->entry_size = srng_config->entry_size;
984 	srng->num_entries = params->num_entries;
985 	srng->ring_size = srng->entry_size * srng->num_entries;
986 	srng->intr_batch_cntr_thres_entries =
987 				params->intr_batch_cntr_thres_entries;
988 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
989 	srng->flags = params->flags;
990 	srng->msi_addr = params->msi_addr;
991 	srng->msi_data = params->msi_data;
992 	srng->initialized = 1;
993 	spin_lock_init(&srng->lock);
994 
995 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
996 		srng->hwreg_base[i] = srng_config->reg_start[i] +
997 				      (ring_num * srng_config->reg_size[i]);
998 	}
999 
1000 	memset(srng->ring_base_vaddr, 0,
1001 	       (srng->entry_size * srng->num_entries) << 2);
1002 
1003 	/* TODO: Add comments on these swap configurations */
1004 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1005 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1006 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
1007 
1008 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1009 
1010 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1011 		srng->u.src_ring.hp = 0;
1012 		srng->u.src_ring.cached_tp = 0;
1013 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1014 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1015 		srng->u.src_ring.low_threshold = params->low_threshold *
1016 						 srng->entry_size;
1017 		if (srng_config->lmac_ring) {
1018 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1019 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1020 						   lmac_idx);
1021 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1022 		} else {
1023 			if (!ab->hw_params.supports_shadow_regs)
1024 				srng->u.src_ring.hp_addr =
1025 				(u32 *)((unsigned long)ab->mem + reg_base);
1026 			else
1027 				ath11k_dbg(ab, ATH11k_DBG_HAL,
1028 					   "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
1029 					   type, ring_num,
1030 					   reg_base,
1031 					   (unsigned long)srng->u.src_ring.hp_addr -
1032 					   (unsigned long)ab->mem);
1033 		}
1034 	} else {
1035 		/* During initialization loop count in all the descriptors
1036 		 * will be set to zero, and HW will set it to 1 on completing
1037 		 * descriptor update in first loop, and increments it by 1 on
1038 		 * subsequent loops (loop count wraps around after reaching
1039 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1040 		 * loop count in descriptors updated by HW (to be processed
1041 		 * by SW).
1042 		 */
1043 		srng->u.dst_ring.loop_cnt = 1;
1044 		srng->u.dst_ring.tp = 0;
1045 		srng->u.dst_ring.cached_hp = 0;
1046 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1047 		if (srng_config->lmac_ring) {
1048 			/* For LMAC rings, tail pointer updates will be done
1049 			 * through FW by writing to a shared memory location
1050 			 */
1051 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1052 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1053 						   lmac_idx);
1054 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1055 		} else {
1056 			if (!ab->hw_params.supports_shadow_regs)
1057 				srng->u.dst_ring.tp_addr =
1058 				(u32 *)((unsigned long)ab->mem + reg_base +
1059 					(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
1060 			else
1061 				ath11k_dbg(ab, ATH11k_DBG_HAL,
1062 					   "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
1063 					   type, ring_num,
1064 					   reg_base + (HAL_REO1_RING_TP(ab) -
1065 						       HAL_REO1_RING_HP(ab)),
1066 					   (unsigned long)srng->u.dst_ring.tp_addr -
1067 					   (unsigned long)ab->mem);
1068 		}
1069 	}
1070 
1071 	if (srng_config->lmac_ring)
1072 		return ring_id;
1073 
1074 	ath11k_hal_srng_hw_init(ab, srng);
1075 
1076 	if (type == HAL_CE_DST) {
1077 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1078 		ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1079 	}
1080 
1081 	return ring_id;
1082 }
1083 
ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base * ab,int shadow_cfg_idx,enum hal_ring_type ring_type,int ring_num)1084 static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
1085 					      int shadow_cfg_idx,
1086 					  enum hal_ring_type ring_type,
1087 					  int ring_num)
1088 {
1089 	struct hal_srng *srng;
1090 	struct ath11k_hal *hal = &ab->hal;
1091 	int ring_id;
1092 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1093 
1094 	ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
1095 	if (ring_id < 0)
1096 		return;
1097 
1098 	srng = &hal->srng_list[ring_id];
1099 
1100 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1101 		srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
1102 						   (unsigned long)ab->mem);
1103 	else
1104 		srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
1105 						   (unsigned long)ab->mem);
1106 }
1107 
ath11k_hal_srng_update_shadow_config(struct ath11k_base * ab,enum hal_ring_type ring_type,int ring_num)1108 int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
1109 					 enum hal_ring_type ring_type,
1110 					 int ring_num)
1111 {
1112 	struct ath11k_hal *hal = &ab->hal;
1113 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1114 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
1115 	u32 target_reg;
1116 
1117 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
1118 		return -EINVAL;
1119 
1120 	hal->num_shadow_reg_configured++;
1121 
1122 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
1123 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
1124 		ring_num;
1125 
1126 	/* For destination ring, shadow the TP */
1127 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1128 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
1129 
1130 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
1131 
1132 	/* update hp/tp addr to hal structure*/
1133 	ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
1134 					  ring_num);
1135 
1136 	ath11k_dbg(ab, ATH11k_DBG_HAL,
1137 		   "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
1138 		  target_reg,
1139 		  HAL_SHADOW_REG(shadow_cfg_idx),
1140 		  shadow_cfg_idx,
1141 		  ring_type, ring_num);
1142 
1143 	return 0;
1144 }
1145 
ath11k_hal_srng_shadow_config(struct ath11k_base * ab)1146 void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
1147 {
1148 	struct ath11k_hal *hal = &ab->hal;
1149 	int ring_type, ring_num;
1150 
1151 	/* update all the non-CE srngs. */
1152 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
1153 		struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1154 
1155 		if (ring_type == HAL_CE_SRC ||
1156 		    ring_type == HAL_CE_DST ||
1157 			ring_type == HAL_CE_DST_STATUS)
1158 			continue;
1159 
1160 		if (srng_config->lmac_ring)
1161 			continue;
1162 
1163 		for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
1164 			ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
1165 	}
1166 }
1167 
ath11k_hal_srng_get_shadow_config(struct ath11k_base * ab,u32 ** cfg,u32 * len)1168 void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
1169 				       u32 **cfg, u32 *len)
1170 {
1171 	struct ath11k_hal *hal = &ab->hal;
1172 
1173 	*len = hal->num_shadow_reg_configured;
1174 	*cfg = hal->shadow_reg_addr;
1175 }
1176 
ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base * ab,struct hal_srng * srng)1177 void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
1178 					 struct hal_srng *srng)
1179 {
1180 	lockdep_assert_held(&srng->lock);
1181 
1182 	/* check whether the ring is emptry. Update the shadow
1183 	 * HP only when then ring isn't' empty.
1184 	 */
1185 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
1186 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
1187 		ath11k_hal_srng_access_end(ab, srng);
1188 }
1189 
ath11k_hal_srng_create_config(struct ath11k_base * ab)1190 static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
1191 {
1192 	struct ath11k_hal *hal = &ab->hal;
1193 	struct hal_srng_config *s;
1194 
1195 	hal->srng_config = kmemdup(hw_srng_config_template,
1196 				   sizeof(hw_srng_config_template),
1197 				   GFP_KERNEL);
1198 	if (!hal->srng_config)
1199 		return -ENOMEM;
1200 
1201 	s = &hal->srng_config[HAL_REO_DST];
1202 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
1203 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
1204 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
1205 	s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
1206 
1207 	s = &hal->srng_config[HAL_REO_EXCEPTION];
1208 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
1209 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
1210 
1211 	s = &hal->srng_config[HAL_REO_REINJECT];
1212 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
1213 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
1214 
1215 	s = &hal->srng_config[HAL_REO_CMD];
1216 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
1217 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
1218 
1219 	s = &hal->srng_config[HAL_REO_STATUS];
1220 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
1221 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
1222 
1223 	s = &hal->srng_config[HAL_TCL_DATA];
1224 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
1225 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
1226 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
1227 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
1228 
1229 	s = &hal->srng_config[HAL_TCL_CMD];
1230 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
1231 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
1232 
1233 	s = &hal->srng_config[HAL_TCL_STATUS];
1234 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
1235 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
1236 
1237 	return 0;
1238 }
1239 
ath11k_hal_srng_init(struct ath11k_base * ab)1240 int ath11k_hal_srng_init(struct ath11k_base *ab)
1241 {
1242 	struct ath11k_hal *hal = &ab->hal;
1243 	int ret;
1244 
1245 	memset(hal, 0, sizeof(*hal));
1246 
1247 	ret = ath11k_hal_srng_create_config(ab);
1248 	if (ret)
1249 		goto err_hal;
1250 
1251 	ret = ath11k_hal_alloc_cont_rdp(ab);
1252 	if (ret)
1253 		goto err_hal;
1254 
1255 	ret = ath11k_hal_alloc_cont_wrp(ab);
1256 	if (ret)
1257 		goto err_free_cont_rdp;
1258 
1259 	return 0;
1260 
1261 err_free_cont_rdp:
1262 	ath11k_hal_free_cont_rdp(ab);
1263 
1264 err_hal:
1265 	return ret;
1266 }
1267 EXPORT_SYMBOL(ath11k_hal_srng_init);
1268 
ath11k_hal_srng_deinit(struct ath11k_base * ab)1269 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1270 {
1271 	struct ath11k_hal *hal = &ab->hal;
1272 
1273 	ath11k_hal_free_cont_rdp(ab);
1274 	ath11k_hal_free_cont_wrp(ab);
1275 	kfree(hal->srng_config);
1276 }
1277 EXPORT_SYMBOL(ath11k_hal_srng_deinit);
1278 
ath11k_hal_dump_srng_stats(struct ath11k_base * ab)1279 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1280 {
1281 	struct hal_srng *srng;
1282 	struct ath11k_ext_irq_grp *irq_grp;
1283 	struct ath11k_ce_pipe *ce_pipe;
1284 	int i;
1285 
1286 	ath11k_err(ab, "Last interrupt received for each CE:\n");
1287 	for (i = 0; i < ab->hw_params.ce_count; i++) {
1288 		ce_pipe = &ab->ce.ce_pipe[i];
1289 
1290 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1291 			continue;
1292 
1293 		ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1294 			   i, ce_pipe->pipe_num,
1295 			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1296 	}
1297 
1298 	ath11k_err(ab, "\nLast interrupt received for each group:\n");
1299 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1300 		irq_grp = &ab->ext_irq_grp[i];
1301 		ath11k_err(ab, "group_id %d %ums before\n",
1302 			   irq_grp->grp_id,
1303 			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
1304 	}
1305 
1306 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1307 		srng = &ab->hal.srng_list[i];
1308 
1309 		if (!srng->initialized)
1310 			continue;
1311 
1312 		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1313 			ath11k_err(ab,
1314 				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1315 				   srng->ring_id, srng->u.src_ring.hp,
1316 				   srng->u.src_ring.reap_hp,
1317 				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1318 				   srng->u.src_ring.last_tp,
1319 				   jiffies_to_msecs(jiffies - srng->timestamp));
1320 		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1321 			ath11k_err(ab,
1322 				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1323 				   srng->ring_id, srng->u.dst_ring.tp,
1324 				   *srng->u.dst_ring.hp_addr,
1325 				   srng->u.dst_ring.cached_hp,
1326 				   srng->u.dst_ring.last_hp,
1327 				   jiffies_to_msecs(jiffies - srng->timestamp));
1328 	}
1329 }
1330