1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/regulator/consumer.h>
13 
14 #include "ce.h"
15 #include "debug.h"
16 #include "hif.h"
17 #include "htc.h"
18 #include "snoc.h"
19 
20 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
21 #define CE_POLL_PIPE 4
22 #define ATH10K_SNOC_WAKE_IRQ 2
23 
24 static char *const ce_name[] = {
25 	"WLAN_CE_0",
26 	"WLAN_CE_1",
27 	"WLAN_CE_2",
28 	"WLAN_CE_3",
29 	"WLAN_CE_4",
30 	"WLAN_CE_5",
31 	"WLAN_CE_6",
32 	"WLAN_CE_7",
33 	"WLAN_CE_8",
34 	"WLAN_CE_9",
35 	"WLAN_CE_10",
36 	"WLAN_CE_11",
37 };
38 
39 static struct ath10k_vreg_info vreg_cfg[] = {
40 	{NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
41 	{NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
42 	{NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
43 	{NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
44 };
45 
46 static struct ath10k_clk_info clk_cfg[] = {
47 	{NULL, "cxo_ref_clk_pin", 0, false},
48 };
49 
50 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
51 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
52 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
53 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
54 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
55 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
56 
57 static const struct ath10k_snoc_drv_priv drv_priv = {
58 	.hw_rev = ATH10K_HW_WCN3990,
59 	.dma_mask = DMA_BIT_MASK(35),
60 	.msa_size = 0x100000,
61 };
62 
63 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
64 #define WCN3990_DST_WR_IDX_OFFSET 0x40
65 
66 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
67 		{
68 			.ce_id = __cpu_to_le16(0),
69 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
70 		},
71 
72 		{
73 			.ce_id = __cpu_to_le16(3),
74 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
75 		},
76 
77 		{
78 			.ce_id = __cpu_to_le16(4),
79 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
80 		},
81 
82 		{
83 			.ce_id = __cpu_to_le16(5),
84 			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
85 		},
86 
87 		{
88 			.ce_id = __cpu_to_le16(7),
89 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
90 		},
91 
92 		{
93 			.ce_id = __cpu_to_le16(1),
94 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
95 		},
96 
97 		{
98 			.ce_id = __cpu_to_le16(2),
99 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
100 		},
101 
102 		{
103 			.ce_id = __cpu_to_le16(7),
104 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
105 		},
106 
107 		{
108 			.ce_id = __cpu_to_le16(8),
109 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
110 		},
111 
112 		{
113 			.ce_id = __cpu_to_le16(9),
114 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
115 		},
116 
117 		{
118 			.ce_id = __cpu_to_le16(10),
119 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
120 		},
121 
122 		{
123 			.ce_id = __cpu_to_le16(11),
124 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
125 		},
126 };
127 
128 static struct ce_attr host_ce_config_wlan[] = {
129 	/* CE0: host->target HTC control streams */
130 	{
131 		.flags = CE_ATTR_FLAGS,
132 		.src_nentries = 16,
133 		.src_sz_max = 2048,
134 		.dest_nentries = 0,
135 		.send_cb = ath10k_snoc_htc_tx_cb,
136 	},
137 
138 	/* CE1: target->host HTT + HTC control */
139 	{
140 		.flags = CE_ATTR_FLAGS,
141 		.src_nentries = 0,
142 		.src_sz_max = 2048,
143 		.dest_nentries = 512,
144 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
145 	},
146 
147 	/* CE2: target->host WMI */
148 	{
149 		.flags = CE_ATTR_FLAGS,
150 		.src_nentries = 0,
151 		.src_sz_max = 2048,
152 		.dest_nentries = 64,
153 		.recv_cb = ath10k_snoc_htc_rx_cb,
154 	},
155 
156 	/* CE3: host->target WMI */
157 	{
158 		.flags = CE_ATTR_FLAGS,
159 		.src_nentries = 32,
160 		.src_sz_max = 2048,
161 		.dest_nentries = 0,
162 		.send_cb = ath10k_snoc_htc_tx_cb,
163 	},
164 
165 	/* CE4: host->target HTT */
166 	{
167 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
168 		.src_nentries = 2048,
169 		.src_sz_max = 256,
170 		.dest_nentries = 0,
171 		.send_cb = ath10k_snoc_htt_tx_cb,
172 	},
173 
174 	/* CE5: target->host HTT (ipa_uc->target ) */
175 	{
176 		.flags = CE_ATTR_FLAGS,
177 		.src_nentries = 0,
178 		.src_sz_max = 512,
179 		.dest_nentries = 512,
180 		.recv_cb = ath10k_snoc_htt_rx_cb,
181 	},
182 
183 	/* CE6: target autonomous hif_memcpy */
184 	{
185 		.flags = CE_ATTR_FLAGS,
186 		.src_nentries = 0,
187 		.src_sz_max = 0,
188 		.dest_nentries = 0,
189 	},
190 
191 	/* CE7: ce_diag, the Diagnostic Window */
192 	{
193 		.flags = CE_ATTR_FLAGS,
194 		.src_nentries = 2,
195 		.src_sz_max = 2048,
196 		.dest_nentries = 2,
197 	},
198 
199 	/* CE8: Target to uMC */
200 	{
201 		.flags = CE_ATTR_FLAGS,
202 		.src_nentries = 0,
203 		.src_sz_max = 2048,
204 		.dest_nentries = 128,
205 	},
206 
207 	/* CE9 target->host HTT */
208 	{
209 		.flags = CE_ATTR_FLAGS,
210 		.src_nentries = 0,
211 		.src_sz_max = 2048,
212 		.dest_nentries = 512,
213 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
214 	},
215 
216 	/* CE10: target->host HTT */
217 	{
218 		.flags = CE_ATTR_FLAGS,
219 		.src_nentries = 0,
220 		.src_sz_max = 2048,
221 		.dest_nentries = 512,
222 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
223 	},
224 
225 	/* CE11: target -> host PKTLOG */
226 	{
227 		.flags = CE_ATTR_FLAGS,
228 		.src_nentries = 0,
229 		.src_sz_max = 2048,
230 		.dest_nentries = 512,
231 		.recv_cb = ath10k_snoc_pktlog_rx_cb,
232 	},
233 };
234 
235 static struct ce_pipe_config target_ce_config_wlan[] = {
236 	/* CE0: host->target HTC control and raw streams */
237 	{
238 		.pipenum = __cpu_to_le32(0),
239 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
240 		.nentries = __cpu_to_le32(32),
241 		.nbytes_max = __cpu_to_le32(2048),
242 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 		.reserved = __cpu_to_le32(0),
244 	},
245 
246 	/* CE1: target->host HTT + HTC control */
247 	{
248 		.pipenum = __cpu_to_le32(1),
249 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
250 		.nentries = __cpu_to_le32(32),
251 		.nbytes_max = __cpu_to_le32(2048),
252 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 		.reserved = __cpu_to_le32(0),
254 	},
255 
256 	/* CE2: target->host WMI */
257 	{
258 		.pipenum = __cpu_to_le32(2),
259 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
260 		.nentries = __cpu_to_le32(64),
261 		.nbytes_max = __cpu_to_le32(2048),
262 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 		.reserved = __cpu_to_le32(0),
264 	},
265 
266 	/* CE3: host->target WMI */
267 	{
268 		.pipenum = __cpu_to_le32(3),
269 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
270 		.nentries = __cpu_to_le32(32),
271 		.nbytes_max = __cpu_to_le32(2048),
272 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 		.reserved = __cpu_to_le32(0),
274 	},
275 
276 	/* CE4: host->target HTT */
277 	{
278 		.pipenum = __cpu_to_le32(4),
279 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 		.nentries = __cpu_to_le32(256),
281 		.nbytes_max = __cpu_to_le32(256),
282 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
283 		.reserved = __cpu_to_le32(0),
284 	},
285 
286 	/* CE5: target->host HTT (HIF->HTT) */
287 	{
288 		.pipenum = __cpu_to_le32(5),
289 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 		.nentries = __cpu_to_le32(1024),
291 		.nbytes_max = __cpu_to_le32(64),
292 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 		.reserved = __cpu_to_le32(0),
294 	},
295 
296 	/* CE6: Reserved for target autonomous hif_memcpy */
297 	{
298 		.pipenum = __cpu_to_le32(6),
299 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
300 		.nentries = __cpu_to_le32(32),
301 		.nbytes_max = __cpu_to_le32(16384),
302 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
303 		.reserved = __cpu_to_le32(0),
304 	},
305 
306 	/* CE7 used only by Host */
307 	{
308 		.pipenum = __cpu_to_le32(7),
309 		.pipedir = __cpu_to_le32(4),
310 		.nentries = __cpu_to_le32(0),
311 		.nbytes_max = __cpu_to_le32(0),
312 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
313 		.reserved = __cpu_to_le32(0),
314 	},
315 
316 	/* CE8 Target to uMC */
317 	{
318 		.pipenum = __cpu_to_le32(8),
319 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
320 		.nentries = __cpu_to_le32(32),
321 		.nbytes_max = __cpu_to_le32(2048),
322 		.flags = __cpu_to_le32(0),
323 		.reserved = __cpu_to_le32(0),
324 	},
325 
326 	/* CE9 target->host HTT */
327 	{
328 		.pipenum = __cpu_to_le32(9),
329 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
330 		.nentries = __cpu_to_le32(32),
331 		.nbytes_max = __cpu_to_le32(2048),
332 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
333 		.reserved = __cpu_to_le32(0),
334 	},
335 
336 	/* CE10 target->host HTT */
337 	{
338 		.pipenum = __cpu_to_le32(10),
339 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
340 		.nentries = __cpu_to_le32(32),
341 		.nbytes_max = __cpu_to_le32(2048),
342 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 		.reserved = __cpu_to_le32(0),
344 	},
345 
346 	/* CE11 target autonomous qcache memcpy */
347 	{
348 		.pipenum = __cpu_to_le32(11),
349 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
350 		.nentries = __cpu_to_le32(32),
351 		.nbytes_max = __cpu_to_le32(2048),
352 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 		.reserved = __cpu_to_le32(0),
354 	},
355 };
356 
357 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
358 	{
359 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
360 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
361 		__cpu_to_le32(3),
362 	},
363 	{
364 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
365 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
366 		__cpu_to_le32(2),
367 	},
368 	{
369 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
370 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
371 		__cpu_to_le32(3),
372 	},
373 	{
374 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
375 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
376 		__cpu_to_le32(2),
377 	},
378 	{
379 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
380 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
381 		__cpu_to_le32(3),
382 	},
383 	{
384 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
385 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
386 		__cpu_to_le32(2),
387 	},
388 	{
389 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
390 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
391 		__cpu_to_le32(3),
392 	},
393 	{
394 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
395 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
396 		__cpu_to_le32(2),
397 	},
398 	{
399 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
400 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
401 		__cpu_to_le32(3),
402 	},
403 	{
404 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
405 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
406 		__cpu_to_le32(2),
407 	},
408 	{
409 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
410 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
411 		__cpu_to_le32(0),
412 	},
413 	{
414 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
415 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
416 		__cpu_to_le32(2),
417 	},
418 	{ /* not used */
419 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
420 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
421 		__cpu_to_le32(0),
422 	},
423 	{ /* not used */
424 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
425 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
426 		__cpu_to_le32(2),
427 	},
428 	{
429 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
430 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
431 		__cpu_to_le32(4),
432 	},
433 	{
434 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
435 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
436 		__cpu_to_le32(1),
437 	},
438 	{ /* not used */
439 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
440 		__cpu_to_le32(PIPEDIR_OUT),
441 		__cpu_to_le32(5),
442 	},
443 	{ /* in = DL = target -> host */
444 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
445 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
446 		__cpu_to_le32(9),
447 	},
448 	{ /* in = DL = target -> host */
449 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
450 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
451 		__cpu_to_le32(10),
452 	},
453 	{ /* in = DL = target -> host pktlog */
454 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
455 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
456 		__cpu_to_le32(11),
457 	},
458 	/* (Additions here) */
459 
460 	{ /* must be last */
461 		__cpu_to_le32(0),
462 		__cpu_to_le32(0),
463 		__cpu_to_le32(0),
464 	},
465 };
466 
ath10k_snoc_write32(struct ath10k * ar,u32 offset,u32 value)467 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
468 {
469 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
470 
471 	iowrite32(value, ar_snoc->mem + offset);
472 }
473 
ath10k_snoc_read32(struct ath10k * ar,u32 offset)474 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
475 {
476 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
477 	u32 val;
478 
479 	val = ioread32(ar_snoc->mem + offset);
480 
481 	return val;
482 }
483 
__ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe * pipe)484 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
485 {
486 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
487 	struct ath10k *ar = pipe->hif_ce_state;
488 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
489 	struct sk_buff *skb;
490 	dma_addr_t paddr;
491 	int ret;
492 
493 	skb = dev_alloc_skb(pipe->buf_sz);
494 	if (!skb)
495 		return -ENOMEM;
496 
497 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
498 
499 	paddr = dma_map_single(ar->dev, skb->data,
500 			       skb->len + skb_tailroom(skb),
501 			       DMA_FROM_DEVICE);
502 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
503 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
504 		dev_kfree_skb_any(skb);
505 		return -EIO;
506 	}
507 
508 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
509 
510 	spin_lock_bh(&ce->ce_lock);
511 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
512 	spin_unlock_bh(&ce->ce_lock);
513 	if (ret) {
514 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
515 				 DMA_FROM_DEVICE);
516 		dev_kfree_skb_any(skb);
517 		return ret;
518 	}
519 
520 	return 0;
521 }
522 
ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe * pipe)523 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
524 {
525 	struct ath10k *ar = pipe->hif_ce_state;
526 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
527 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
528 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
529 	int ret, num;
530 
531 	if (pipe->buf_sz == 0)
532 		return;
533 
534 	if (!ce_pipe->dest_ring)
535 		return;
536 
537 	spin_lock_bh(&ce->ce_lock);
538 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
539 	spin_unlock_bh(&ce->ce_lock);
540 	while (num--) {
541 		ret = __ath10k_snoc_rx_post_buf(pipe);
542 		if (ret) {
543 			if (ret == -ENOSPC)
544 				break;
545 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
546 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
547 				  ATH10K_SNOC_RX_POST_RETRY_MS);
548 			break;
549 		}
550 	}
551 }
552 
ath10k_snoc_rx_post(struct ath10k * ar)553 static void ath10k_snoc_rx_post(struct ath10k *ar)
554 {
555 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
556 	int i;
557 
558 	for (i = 0; i < CE_COUNT; i++)
559 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
560 }
561 
ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe * ce_state,void (* callback)(struct ath10k * ar,struct sk_buff * skb))562 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
563 				      void (*callback)(struct ath10k *ar,
564 						       struct sk_buff *skb))
565 {
566 	struct ath10k *ar = ce_state->ar;
567 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
568 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
569 	struct sk_buff *skb;
570 	struct sk_buff_head list;
571 	void *transfer_context;
572 	unsigned int nbytes, max_nbytes;
573 
574 	__skb_queue_head_init(&list);
575 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
576 					     &nbytes) == 0) {
577 		skb = transfer_context;
578 		max_nbytes = skb->len + skb_tailroom(skb);
579 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
580 				 max_nbytes, DMA_FROM_DEVICE);
581 
582 		if (unlikely(max_nbytes < nbytes)) {
583 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
584 				    nbytes, max_nbytes);
585 			dev_kfree_skb_any(skb);
586 			continue;
587 		}
588 
589 		skb_put(skb, nbytes);
590 		__skb_queue_tail(&list, skb);
591 	}
592 
593 	while ((skb = __skb_dequeue(&list))) {
594 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
595 			   ce_state->id, skb->len);
596 
597 		callback(ar, skb);
598 	}
599 
600 	ath10k_snoc_rx_post_pipe(pipe_info);
601 }
602 
ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe * ce_state)603 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
604 {
605 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
606 }
607 
ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe * ce_state)608 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
609 {
610 	/* CE4 polling needs to be done whenever CE pipe which transports
611 	 * HTT Rx (target->host) is processed.
612 	 */
613 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
614 
615 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
616 }
617 
618 /* Called by lower (CE) layer when data is received from the Target.
619  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
620  */
ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe * ce_state)621 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
622 {
623 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
624 }
625 
ath10k_snoc_htt_rx_deliver(struct ath10k * ar,struct sk_buff * skb)626 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
627 {
628 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
629 	ath10k_htt_t2h_msg_handler(ar, skb);
630 }
631 
ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe * ce_state)632 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
633 {
634 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
635 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
636 }
637 
ath10k_snoc_rx_replenish_retry(struct timer_list * t)638 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
639 {
640 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
641 	struct ath10k *ar = ar_snoc->ar;
642 
643 	ath10k_snoc_rx_post(ar);
644 }
645 
ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe * ce_state)646 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
647 {
648 	struct ath10k *ar = ce_state->ar;
649 	struct sk_buff_head list;
650 	struct sk_buff *skb;
651 
652 	__skb_queue_head_init(&list);
653 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
654 		if (!skb)
655 			continue;
656 
657 		__skb_queue_tail(&list, skb);
658 	}
659 
660 	while ((skb = __skb_dequeue(&list)))
661 		ath10k_htc_tx_completion_handler(ar, skb);
662 }
663 
ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe * ce_state)664 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
665 {
666 	struct ath10k *ar = ce_state->ar;
667 	struct sk_buff *skb;
668 
669 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
670 		if (!skb)
671 			continue;
672 
673 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
674 				 skb->len, DMA_TO_DEVICE);
675 		ath10k_htt_hif_tx_complete(ar, skb);
676 	}
677 }
678 
ath10k_snoc_hif_tx_sg(struct ath10k * ar,u8 pipe_id,struct ath10k_hif_sg_item * items,int n_items)679 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
680 				 struct ath10k_hif_sg_item *items, int n_items)
681 {
682 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
683 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
684 	struct ath10k_snoc_pipe *snoc_pipe;
685 	struct ath10k_ce_pipe *ce_pipe;
686 	int err, i = 0;
687 
688 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
689 	ce_pipe = snoc_pipe->ce_hdl;
690 	spin_lock_bh(&ce->ce_lock);
691 
692 	for (i = 0; i < n_items - 1; i++) {
693 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
694 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
695 			   i, &items[i].paddr, items[i].len, n_items);
696 
697 		err = ath10k_ce_send_nolock(ce_pipe,
698 					    items[i].transfer_context,
699 					    items[i].paddr,
700 					    items[i].len,
701 					    items[i].transfer_id,
702 					    CE_SEND_FLAG_GATHER);
703 		if (err)
704 			goto err;
705 	}
706 
707 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
708 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
709 		   i, &items[i].paddr, items[i].len, n_items);
710 
711 	err = ath10k_ce_send_nolock(ce_pipe,
712 				    items[i].transfer_context,
713 				    items[i].paddr,
714 				    items[i].len,
715 				    items[i].transfer_id,
716 				    0);
717 	if (err)
718 		goto err;
719 
720 	spin_unlock_bh(&ce->ce_lock);
721 
722 	return 0;
723 
724 err:
725 	for (; i > 0; i--)
726 		__ath10k_ce_send_revert(ce_pipe);
727 
728 	spin_unlock_bh(&ce->ce_lock);
729 	return err;
730 }
731 
ath10k_snoc_hif_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)732 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
733 					   struct bmi_target_info *target_info)
734 {
735 	target_info->version = ATH10K_HW_WCN3990;
736 	target_info->type = ATH10K_HW_WCN3990;
737 
738 	return 0;
739 }
740 
ath10k_snoc_hif_get_free_queue_number(struct ath10k * ar,u8 pipe)741 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
742 {
743 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
744 
745 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
746 
747 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
748 }
749 
ath10k_snoc_hif_send_complete_check(struct ath10k * ar,u8 pipe,int force)750 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
751 						int force)
752 {
753 	int resources;
754 
755 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
756 
757 	if (!force) {
758 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
759 
760 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
761 			return;
762 	}
763 	ath10k_ce_per_engine_service(ar, pipe);
764 }
765 
ath10k_snoc_hif_map_service_to_pipe(struct ath10k * ar,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)766 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
767 					       u16 service_id,
768 					       u8 *ul_pipe, u8 *dl_pipe)
769 {
770 	const struct service_to_pipe *entry;
771 	bool ul_set = false, dl_set = false;
772 	int i;
773 
774 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
775 
776 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
777 		entry = &target_service_to_ce_map_wlan[i];
778 
779 		if (__le32_to_cpu(entry->service_id) != service_id)
780 			continue;
781 
782 		switch (__le32_to_cpu(entry->pipedir)) {
783 		case PIPEDIR_NONE:
784 			break;
785 		case PIPEDIR_IN:
786 			WARN_ON(dl_set);
787 			*dl_pipe = __le32_to_cpu(entry->pipenum);
788 			dl_set = true;
789 			break;
790 		case PIPEDIR_OUT:
791 			WARN_ON(ul_set);
792 			*ul_pipe = __le32_to_cpu(entry->pipenum);
793 			ul_set = true;
794 			break;
795 		case PIPEDIR_INOUT:
796 			WARN_ON(dl_set);
797 			WARN_ON(ul_set);
798 			*dl_pipe = __le32_to_cpu(entry->pipenum);
799 			*ul_pipe = __le32_to_cpu(entry->pipenum);
800 			dl_set = true;
801 			ul_set = true;
802 			break;
803 		}
804 	}
805 
806 	if (!ul_set || !dl_set)
807 		return -ENOENT;
808 
809 	return 0;
810 }
811 
ath10k_snoc_hif_get_default_pipe(struct ath10k * ar,u8 * ul_pipe,u8 * dl_pipe)812 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
813 					     u8 *ul_pipe, u8 *dl_pipe)
814 {
815 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
816 
817 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
818 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
819 						 ul_pipe, dl_pipe);
820 }
821 
ath10k_snoc_irq_disable(struct ath10k * ar)822 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
823 {
824 	ath10k_ce_disable_interrupts(ar);
825 }
826 
ath10k_snoc_irq_enable(struct ath10k * ar)827 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
828 {
829 	ath10k_ce_enable_interrupts(ar);
830 }
831 
ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe * snoc_pipe)832 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
833 {
834 	struct ath10k_ce_pipe *ce_pipe;
835 	struct ath10k_ce_ring *ce_ring;
836 	struct sk_buff *skb;
837 	struct ath10k *ar;
838 	int i;
839 
840 	ar = snoc_pipe->hif_ce_state;
841 	ce_pipe = snoc_pipe->ce_hdl;
842 	ce_ring = ce_pipe->dest_ring;
843 
844 	if (!ce_ring)
845 		return;
846 
847 	if (!snoc_pipe->buf_sz)
848 		return;
849 
850 	for (i = 0; i < ce_ring->nentries; i++) {
851 		skb = ce_ring->per_transfer_context[i];
852 		if (!skb)
853 			continue;
854 
855 		ce_ring->per_transfer_context[i] = NULL;
856 
857 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
858 				 skb->len + skb_tailroom(skb),
859 				 DMA_FROM_DEVICE);
860 		dev_kfree_skb_any(skb);
861 	}
862 }
863 
ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe * snoc_pipe)864 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
865 {
866 	struct ath10k_ce_pipe *ce_pipe;
867 	struct ath10k_ce_ring *ce_ring;
868 	struct sk_buff *skb;
869 	struct ath10k *ar;
870 	int i;
871 
872 	ar = snoc_pipe->hif_ce_state;
873 	ce_pipe = snoc_pipe->ce_hdl;
874 	ce_ring = ce_pipe->src_ring;
875 
876 	if (!ce_ring)
877 		return;
878 
879 	if (!snoc_pipe->buf_sz)
880 		return;
881 
882 	for (i = 0; i < ce_ring->nentries; i++) {
883 		skb = ce_ring->per_transfer_context[i];
884 		if (!skb)
885 			continue;
886 
887 		ce_ring->per_transfer_context[i] = NULL;
888 
889 		ath10k_htc_tx_completion_handler(ar, skb);
890 	}
891 }
892 
ath10k_snoc_buffer_cleanup(struct ath10k * ar)893 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
894 {
895 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
896 	struct ath10k_snoc_pipe *pipe_info;
897 	int pipe_num;
898 
899 	del_timer_sync(&ar_snoc->rx_post_retry);
900 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
901 		pipe_info = &ar_snoc->pipe_info[pipe_num];
902 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
903 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
904 	}
905 }
906 
ath10k_snoc_hif_stop(struct ath10k * ar)907 static void ath10k_snoc_hif_stop(struct ath10k *ar)
908 {
909 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
910 		ath10k_snoc_irq_disable(ar);
911 
912 	napi_synchronize(&ar->napi);
913 	napi_disable(&ar->napi);
914 	ath10k_snoc_buffer_cleanup(ar);
915 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
916 }
917 
ath10k_snoc_hif_start(struct ath10k * ar)918 static int ath10k_snoc_hif_start(struct ath10k *ar)
919 {
920 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
921 
922 	napi_enable(&ar->napi);
923 	ath10k_snoc_irq_enable(ar);
924 	ath10k_snoc_rx_post(ar);
925 
926 	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
927 
928 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
929 
930 	return 0;
931 }
932 
ath10k_snoc_init_pipes(struct ath10k * ar)933 static int ath10k_snoc_init_pipes(struct ath10k *ar)
934 {
935 	int i, ret;
936 
937 	for (i = 0; i < CE_COUNT; i++) {
938 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
939 		if (ret) {
940 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
941 				   i, ret);
942 			return ret;
943 		}
944 	}
945 
946 	return 0;
947 }
948 
ath10k_snoc_wlan_enable(struct ath10k * ar,enum ath10k_firmware_mode fw_mode)949 static int ath10k_snoc_wlan_enable(struct ath10k *ar,
950 				   enum ath10k_firmware_mode fw_mode)
951 {
952 	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
953 	struct ath10k_qmi_wlan_enable_cfg cfg;
954 	enum wlfw_driver_mode_enum_v01 mode;
955 	int pipe_num;
956 
957 	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
958 		tgt_cfg[pipe_num].pipe_num =
959 				target_ce_config_wlan[pipe_num].pipenum;
960 		tgt_cfg[pipe_num].pipe_dir =
961 				target_ce_config_wlan[pipe_num].pipedir;
962 		tgt_cfg[pipe_num].nentries =
963 				target_ce_config_wlan[pipe_num].nentries;
964 		tgt_cfg[pipe_num].nbytes_max =
965 				target_ce_config_wlan[pipe_num].nbytes_max;
966 		tgt_cfg[pipe_num].flags =
967 				target_ce_config_wlan[pipe_num].flags;
968 		tgt_cfg[pipe_num].reserved = 0;
969 	}
970 
971 	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
972 				sizeof(struct ath10k_tgt_pipe_cfg);
973 	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
974 		&tgt_cfg;
975 	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
976 				  sizeof(struct ath10k_svc_pipe_cfg);
977 	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
978 		&target_service_to_ce_map_wlan;
979 	cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
980 					sizeof(struct ath10k_shadow_reg_cfg);
981 	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
982 		&target_shadow_reg_cfg_map;
983 
984 	switch (fw_mode) {
985 	case ATH10K_FIRMWARE_MODE_NORMAL:
986 		mode = QMI_WLFW_MISSION_V01;
987 		break;
988 	case ATH10K_FIRMWARE_MODE_UTF:
989 		mode = QMI_WLFW_FTM_V01;
990 		break;
991 	default:
992 		ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
993 		return -EINVAL;
994 	}
995 
996 	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
997 				       NULL);
998 }
999 
ath10k_snoc_wlan_disable(struct ath10k * ar)1000 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1001 {
1002 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1003 
1004 	/* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1005 	 * flags are not set, it means that the driver has restarted
1006 	 * due to a crash inject via debugfs. In this case, the driver
1007 	 * needs to restart the firmware and hence send qmi wlan disable,
1008 	 * during the driver restart sequence.
1009 	 */
1010 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1011 	    !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1012 		ath10k_qmi_wlan_disable(ar);
1013 }
1014 
ath10k_snoc_hif_power_down(struct ath10k * ar)1015 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1016 {
1017 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1018 
1019 	ath10k_snoc_wlan_disable(ar);
1020 	ath10k_ce_free_rri(ar);
1021 }
1022 
ath10k_snoc_hif_power_up(struct ath10k * ar,enum ath10k_firmware_mode fw_mode)1023 static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1024 				    enum ath10k_firmware_mode fw_mode)
1025 {
1026 	int ret;
1027 
1028 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1029 		   __func__, ar->state);
1030 
1031 	ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1032 	if (ret) {
1033 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1034 		return ret;
1035 	}
1036 
1037 	ath10k_ce_alloc_rri(ar);
1038 
1039 	ret = ath10k_snoc_init_pipes(ar);
1040 	if (ret) {
1041 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1042 		goto err_wlan_enable;
1043 	}
1044 
1045 	return 0;
1046 
1047 err_wlan_enable:
1048 	ath10k_snoc_wlan_disable(ar);
1049 
1050 	return ret;
1051 }
1052 
ath10k_snoc_hif_set_target_log_mode(struct ath10k * ar,u8 fw_log_mode)1053 static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1054 					       u8 fw_log_mode)
1055 {
1056 	u8 fw_dbg_mode;
1057 
1058 	if (fw_log_mode)
1059 		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1060 	else
1061 		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1062 
1063 	return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1064 }
1065 
1066 #ifdef CONFIG_PM
ath10k_snoc_hif_suspend(struct ath10k * ar)1067 static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1068 {
1069 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1070 	int ret;
1071 
1072 	if (!device_may_wakeup(ar->dev))
1073 		return -EPERM;
1074 
1075 	ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1076 	if (ret) {
1077 		ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1078 		return ret;
1079 	}
1080 
1081 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1082 
1083 	return ret;
1084 }
1085 
ath10k_snoc_hif_resume(struct ath10k * ar)1086 static int ath10k_snoc_hif_resume(struct ath10k *ar)
1087 {
1088 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1089 	int ret;
1090 
1091 	if (!device_may_wakeup(ar->dev))
1092 		return -EPERM;
1093 
1094 	ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1095 	if (ret) {
1096 		ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1097 		return ret;
1098 	}
1099 
1100 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1101 
1102 	return ret;
1103 }
1104 #endif
1105 
1106 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1107 	.read32		= ath10k_snoc_read32,
1108 	.write32	= ath10k_snoc_write32,
1109 	.start		= ath10k_snoc_hif_start,
1110 	.stop		= ath10k_snoc_hif_stop,
1111 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1112 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1113 	.power_up		= ath10k_snoc_hif_power_up,
1114 	.power_down		= ath10k_snoc_hif_power_down,
1115 	.tx_sg			= ath10k_snoc_hif_tx_sg,
1116 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1117 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1118 	.get_target_info	= ath10k_snoc_hif_get_target_info,
1119 	.set_target_log_mode    = ath10k_snoc_hif_set_target_log_mode,
1120 
1121 #ifdef CONFIG_PM
1122 	.suspend                = ath10k_snoc_hif_suspend,
1123 	.resume                 = ath10k_snoc_hif_resume,
1124 #endif
1125 };
1126 
1127 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1128 	.read32		= ath10k_snoc_read32,
1129 	.write32	= ath10k_snoc_write32,
1130 };
1131 
ath10k_snoc_get_ce_id_from_irq(struct ath10k * ar,int irq)1132 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1133 {
1134 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1135 	int i;
1136 
1137 	for (i = 0; i < CE_COUNT_MAX; i++) {
1138 		if (ar_snoc->ce_irqs[i].irq_line == irq)
1139 			return i;
1140 	}
1141 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1142 
1143 	return -EINVAL;
1144 }
1145 
ath10k_snoc_per_engine_handler(int irq,void * arg)1146 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1147 {
1148 	struct ath10k *ar = arg;
1149 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1150 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1151 
1152 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1153 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1154 			    ce_id);
1155 		return IRQ_HANDLED;
1156 	}
1157 
1158 	ath10k_snoc_irq_disable(ar);
1159 	napi_schedule(&ar->napi);
1160 
1161 	return IRQ_HANDLED;
1162 }
1163 
ath10k_snoc_napi_poll(struct napi_struct * ctx,int budget)1164 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1165 {
1166 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1167 	int done = 0;
1168 
1169 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1170 		napi_complete(ctx);
1171 		return done;
1172 	}
1173 
1174 	ath10k_ce_per_engine_service_any(ar);
1175 	done = ath10k_htt_txrx_compl_task(ar, budget);
1176 
1177 	if (done < budget) {
1178 		napi_complete(ctx);
1179 		ath10k_snoc_irq_enable(ar);
1180 	}
1181 
1182 	return done;
1183 }
1184 
ath10k_snoc_init_napi(struct ath10k * ar)1185 static void ath10k_snoc_init_napi(struct ath10k *ar)
1186 {
1187 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1188 		       ATH10K_NAPI_BUDGET);
1189 }
1190 
ath10k_snoc_request_irq(struct ath10k * ar)1191 static int ath10k_snoc_request_irq(struct ath10k *ar)
1192 {
1193 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1194 	int irqflags = IRQF_TRIGGER_RISING;
1195 	int ret, id;
1196 
1197 	for (id = 0; id < CE_COUNT_MAX; id++) {
1198 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1199 				  ath10k_snoc_per_engine_handler,
1200 				  irqflags, ce_name[id], ar);
1201 		if (ret) {
1202 			ath10k_err(ar,
1203 				   "failed to register IRQ handler for CE %d: %d",
1204 				   id, ret);
1205 			goto err_irq;
1206 		}
1207 	}
1208 
1209 	return 0;
1210 
1211 err_irq:
1212 	for (id -= 1; id >= 0; id--)
1213 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1214 
1215 	return ret;
1216 }
1217 
ath10k_snoc_free_irq(struct ath10k * ar)1218 static void ath10k_snoc_free_irq(struct ath10k *ar)
1219 {
1220 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1221 	int id;
1222 
1223 	for (id = 0; id < CE_COUNT_MAX; id++)
1224 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1225 }
1226 
ath10k_snoc_resource_init(struct ath10k * ar)1227 static int ath10k_snoc_resource_init(struct ath10k *ar)
1228 {
1229 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1230 	struct platform_device *pdev;
1231 	struct resource *res;
1232 	int i, ret = 0;
1233 
1234 	pdev = ar_snoc->dev;
1235 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1236 	if (!res) {
1237 		ath10k_err(ar, "Memory base not found in DT\n");
1238 		return -EINVAL;
1239 	}
1240 
1241 	ar_snoc->mem_pa = res->start;
1242 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1243 				    resource_size(res));
1244 	if (!ar_snoc->mem) {
1245 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1246 			   &ar_snoc->mem_pa);
1247 		return -EINVAL;
1248 	}
1249 
1250 	for (i = 0; i < CE_COUNT; i++) {
1251 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1252 		if (!res) {
1253 			ath10k_err(ar, "failed to get IRQ%d\n", i);
1254 			ret = -ENODEV;
1255 			goto out;
1256 		}
1257 		ar_snoc->ce_irqs[i].irq_line = res->start;
1258 	}
1259 
1260 out:
1261 	return ret;
1262 }
1263 
ath10k_snoc_fw_indication(struct ath10k * ar,u64 type)1264 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1265 {
1266 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1267 	struct ath10k_bus_params bus_params = {};
1268 	int ret;
1269 
1270 	if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1271 		return 0;
1272 
1273 	switch (type) {
1274 	case ATH10K_QMI_EVENT_FW_READY_IND:
1275 		if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1276 			queue_work(ar->workqueue, &ar->restart_work);
1277 			break;
1278 		}
1279 
1280 		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1281 		bus_params.chip_id = ar_snoc->target_info.soc_version;
1282 		ret = ath10k_core_register(ar, &bus_params);
1283 		if (ret) {
1284 			ath10k_err(ar, "Failed to register driver core: %d\n",
1285 				   ret);
1286 			return ret;
1287 		}
1288 		set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1289 		break;
1290 	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1291 		set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1292 		set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1293 		break;
1294 	default:
1295 		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1296 		return -EINVAL;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
ath10k_snoc_setup_resource(struct ath10k * ar)1302 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1303 {
1304 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1305 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1306 	struct ath10k_snoc_pipe *pipe;
1307 	int i, ret;
1308 
1309 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1310 	spin_lock_init(&ce->ce_lock);
1311 	for (i = 0; i < CE_COUNT; i++) {
1312 		pipe = &ar_snoc->pipe_info[i];
1313 		pipe->ce_hdl = &ce->ce_states[i];
1314 		pipe->pipe_num = i;
1315 		pipe->hif_ce_state = ar;
1316 
1317 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1318 		if (ret) {
1319 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1320 				   i, ret);
1321 			return ret;
1322 		}
1323 
1324 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1325 	}
1326 	ath10k_snoc_init_napi(ar);
1327 
1328 	return 0;
1329 }
1330 
ath10k_snoc_release_resource(struct ath10k * ar)1331 static void ath10k_snoc_release_resource(struct ath10k *ar)
1332 {
1333 	int i;
1334 
1335 	netif_napi_del(&ar->napi);
1336 	for (i = 0; i < CE_COUNT; i++)
1337 		ath10k_ce_free_pipe(ar, i);
1338 }
1339 
ath10k_get_vreg_info(struct ath10k * ar,struct device * dev,struct ath10k_vreg_info * vreg_info)1340 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
1341 				struct ath10k_vreg_info *vreg_info)
1342 {
1343 	struct regulator *reg;
1344 	int ret = 0;
1345 
1346 	reg = devm_regulator_get_optional(dev, vreg_info->name);
1347 
1348 	if (IS_ERR(reg)) {
1349 		ret = PTR_ERR(reg);
1350 
1351 		if (ret  == -EPROBE_DEFER) {
1352 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1353 				   vreg_info->name);
1354 			return ret;
1355 		}
1356 		if (vreg_info->required) {
1357 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1358 				   vreg_info->name, ret);
1359 			return ret;
1360 		}
1361 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1362 			   "Optional regulator %s doesn't exist: %d\n",
1363 			   vreg_info->name, ret);
1364 		goto done;
1365 	}
1366 
1367 	vreg_info->reg = reg;
1368 
1369 done:
1370 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1371 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1372 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1373 		   vreg_info->load_ua, vreg_info->settle_delay);
1374 
1375 	return 0;
1376 }
1377 
ath10k_get_clk_info(struct ath10k * ar,struct device * dev,struct ath10k_clk_info * clk_info)1378 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1379 			       struct ath10k_clk_info *clk_info)
1380 {
1381 	struct clk *handle;
1382 	int ret = 0;
1383 
1384 	handle = devm_clk_get(dev, clk_info->name);
1385 	if (IS_ERR(handle)) {
1386 		ret = PTR_ERR(handle);
1387 		if (clk_info->required) {
1388 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1389 				   clk_info->name, ret);
1390 			return ret;
1391 		}
1392 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1393 			   clk_info->name,
1394 			   ret);
1395 		return 0;
1396 	}
1397 
1398 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1399 		   clk_info->name, clk_info->freq);
1400 
1401 	clk_info->handle = handle;
1402 
1403 	return ret;
1404 }
1405 
__ath10k_snoc_vreg_on(struct ath10k * ar,struct ath10k_vreg_info * vreg_info)1406 static int __ath10k_snoc_vreg_on(struct ath10k *ar,
1407 				 struct ath10k_vreg_info *vreg_info)
1408 {
1409 	int ret;
1410 
1411 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1412 		   vreg_info->name);
1413 
1414 	ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1415 				    vreg_info->max_v);
1416 	if (ret) {
1417 		ath10k_err(ar,
1418 			   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1419 			   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1420 		return ret;
1421 	}
1422 
1423 	if (vreg_info->load_ua) {
1424 		ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
1425 		if (ret < 0) {
1426 			ath10k_err(ar, "failed to set regulator %s load: %d\n",
1427 				   vreg_info->name, vreg_info->load_ua);
1428 			goto err_set_load;
1429 		}
1430 	}
1431 
1432 	ret = regulator_enable(vreg_info->reg);
1433 	if (ret) {
1434 		ath10k_err(ar, "failed to enable regulator %s\n",
1435 			   vreg_info->name);
1436 		goto err_enable;
1437 	}
1438 
1439 	if (vreg_info->settle_delay)
1440 		udelay(vreg_info->settle_delay);
1441 
1442 	return 0;
1443 
1444 err_enable:
1445 	regulator_set_load(vreg_info->reg, 0);
1446 err_set_load:
1447 	regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1448 
1449 	return ret;
1450 }
1451 
__ath10k_snoc_vreg_off(struct ath10k * ar,struct ath10k_vreg_info * vreg_info)1452 static int __ath10k_snoc_vreg_off(struct ath10k *ar,
1453 				  struct ath10k_vreg_info *vreg_info)
1454 {
1455 	int ret;
1456 
1457 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1458 		   vreg_info->name);
1459 
1460 	ret = regulator_disable(vreg_info->reg);
1461 	if (ret)
1462 		ath10k_err(ar, "failed to disable regulator %s\n",
1463 			   vreg_info->name);
1464 
1465 	ret = regulator_set_load(vreg_info->reg, 0);
1466 	if (ret < 0)
1467 		ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
1468 
1469 	ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1470 	if (ret)
1471 		ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
1472 
1473 	return ret;
1474 }
1475 
ath10k_snoc_vreg_on(struct ath10k * ar)1476 static int ath10k_snoc_vreg_on(struct ath10k *ar)
1477 {
1478 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1479 	struct ath10k_vreg_info *vreg_info;
1480 	int ret = 0;
1481 	int i;
1482 
1483 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1484 		vreg_info = &ar_snoc->vreg[i];
1485 
1486 		if (!vreg_info->reg)
1487 			continue;
1488 
1489 		ret = __ath10k_snoc_vreg_on(ar, vreg_info);
1490 		if (ret)
1491 			goto err_reg_config;
1492 	}
1493 
1494 	return 0;
1495 
1496 err_reg_config:
1497 	for (i = i - 1; i >= 0; i--) {
1498 		vreg_info = &ar_snoc->vreg[i];
1499 
1500 		if (!vreg_info->reg)
1501 			continue;
1502 
1503 		__ath10k_snoc_vreg_off(ar, vreg_info);
1504 	}
1505 
1506 	return ret;
1507 }
1508 
ath10k_snoc_vreg_off(struct ath10k * ar)1509 static int ath10k_snoc_vreg_off(struct ath10k *ar)
1510 {
1511 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1512 	struct ath10k_vreg_info *vreg_info;
1513 	int ret = 0;
1514 	int i;
1515 
1516 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1517 		vreg_info = &ar_snoc->vreg[i];
1518 
1519 		if (!vreg_info->reg)
1520 			continue;
1521 
1522 		ret = __ath10k_snoc_vreg_off(ar, vreg_info);
1523 	}
1524 
1525 	return ret;
1526 }
1527 
ath10k_snoc_clk_init(struct ath10k * ar)1528 static int ath10k_snoc_clk_init(struct ath10k *ar)
1529 {
1530 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1531 	struct ath10k_clk_info *clk_info;
1532 	int ret = 0;
1533 	int i;
1534 
1535 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1536 		clk_info = &ar_snoc->clk[i];
1537 
1538 		if (!clk_info->handle)
1539 			continue;
1540 
1541 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1542 			   clk_info->name);
1543 
1544 		if (clk_info->freq) {
1545 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1546 
1547 			if (ret) {
1548 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1549 					   clk_info->name, clk_info->freq);
1550 				goto err_clock_config;
1551 			}
1552 		}
1553 
1554 		ret = clk_prepare_enable(clk_info->handle);
1555 		if (ret) {
1556 			ath10k_err(ar, "failed to enable clock %s\n",
1557 				   clk_info->name);
1558 			goto err_clock_config;
1559 		}
1560 	}
1561 
1562 	return 0;
1563 
1564 err_clock_config:
1565 	for (i = i - 1; i >= 0; i--) {
1566 		clk_info = &ar_snoc->clk[i];
1567 
1568 		if (!clk_info->handle)
1569 			continue;
1570 
1571 		clk_disable_unprepare(clk_info->handle);
1572 	}
1573 
1574 	return ret;
1575 }
1576 
ath10k_snoc_clk_deinit(struct ath10k * ar)1577 static int ath10k_snoc_clk_deinit(struct ath10k *ar)
1578 {
1579 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1580 	struct ath10k_clk_info *clk_info;
1581 	int i;
1582 
1583 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1584 		clk_info = &ar_snoc->clk[i];
1585 
1586 		if (!clk_info->handle)
1587 			continue;
1588 
1589 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1590 			   clk_info->name);
1591 
1592 		clk_disable_unprepare(clk_info->handle);
1593 	}
1594 
1595 	return 0;
1596 }
1597 
ath10k_hw_power_on(struct ath10k * ar)1598 static int ath10k_hw_power_on(struct ath10k *ar)
1599 {
1600 	int ret;
1601 
1602 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1603 
1604 	ret = ath10k_snoc_vreg_on(ar);
1605 	if (ret)
1606 		return ret;
1607 
1608 	ret = ath10k_snoc_clk_init(ar);
1609 	if (ret)
1610 		goto vreg_off;
1611 
1612 	return ret;
1613 
1614 vreg_off:
1615 	ath10k_snoc_vreg_off(ar);
1616 	return ret;
1617 }
1618 
ath10k_hw_power_off(struct ath10k * ar)1619 static int ath10k_hw_power_off(struct ath10k *ar)
1620 {
1621 	int ret;
1622 
1623 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1624 
1625 	ath10k_snoc_clk_deinit(ar);
1626 
1627 	ret = ath10k_snoc_vreg_off(ar);
1628 
1629 	return ret;
1630 }
1631 
1632 static const struct of_device_id ath10k_snoc_dt_match[] = {
1633 	{ .compatible = "qcom,wcn3990-wifi",
1634 	 .data = &drv_priv,
1635 	},
1636 	{ }
1637 };
1638 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1639 
ath10k_snoc_probe(struct platform_device * pdev)1640 static int ath10k_snoc_probe(struct platform_device *pdev)
1641 {
1642 	const struct ath10k_snoc_drv_priv *drv_data;
1643 	const struct of_device_id *of_id;
1644 	struct ath10k_snoc *ar_snoc;
1645 	struct device *dev;
1646 	struct ath10k *ar;
1647 	u32 msa_size;
1648 	int ret;
1649 	u32 i;
1650 
1651 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1652 	if (!of_id) {
1653 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1654 		return -EINVAL;
1655 	}
1656 
1657 	drv_data = of_id->data;
1658 	dev = &pdev->dev;
1659 
1660 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1661 	if (ret) {
1662 		dev_err(dev, "failed to set dma mask: %d", ret);
1663 		return ret;
1664 	}
1665 
1666 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1667 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1668 	if (!ar) {
1669 		dev_err(dev, "failed to allocate core\n");
1670 		return -ENOMEM;
1671 	}
1672 
1673 	ar_snoc = ath10k_snoc_priv(ar);
1674 	ar_snoc->dev = pdev;
1675 	platform_set_drvdata(pdev, ar);
1676 	ar_snoc->ar = ar;
1677 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1678 	ar->ce_priv = &ar_snoc->ce;
1679 	msa_size = drv_data->msa_size;
1680 
1681 	ret = ath10k_snoc_resource_init(ar);
1682 	if (ret) {
1683 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1684 		goto err_core_destroy;
1685 	}
1686 
1687 	ret = ath10k_snoc_setup_resource(ar);
1688 	if (ret) {
1689 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1690 		goto err_core_destroy;
1691 	}
1692 	ret = ath10k_snoc_request_irq(ar);
1693 	if (ret) {
1694 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1695 		goto err_release_resource;
1696 	}
1697 
1698 	ar_snoc->vreg = vreg_cfg;
1699 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1700 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1701 		if (ret)
1702 			goto err_free_irq;
1703 	}
1704 
1705 	ar_snoc->clk = clk_cfg;
1706 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1707 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1708 		if (ret)
1709 			goto err_free_irq;
1710 	}
1711 
1712 	ret = ath10k_hw_power_on(ar);
1713 	if (ret) {
1714 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1715 		goto err_free_irq;
1716 	}
1717 
1718 	ret = ath10k_qmi_init(ar, msa_size);
1719 	if (ret) {
1720 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1721 		goto err_core_destroy;
1722 	}
1723 
1724 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1725 
1726 	return 0;
1727 
1728 err_free_irq:
1729 	ath10k_snoc_free_irq(ar);
1730 
1731 err_release_resource:
1732 	ath10k_snoc_release_resource(ar);
1733 
1734 err_core_destroy:
1735 	ath10k_core_destroy(ar);
1736 
1737 	return ret;
1738 }
1739 
ath10k_snoc_remove(struct platform_device * pdev)1740 static int ath10k_snoc_remove(struct platform_device *pdev)
1741 {
1742 	struct ath10k *ar = platform_get_drvdata(pdev);
1743 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1744 
1745 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1746 
1747 	reinit_completion(&ar->driver_recovery);
1748 
1749 	if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1750 		wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1751 
1752 	set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1753 
1754 	ath10k_core_unregister(ar);
1755 	ath10k_hw_power_off(ar);
1756 	ath10k_snoc_free_irq(ar);
1757 	ath10k_snoc_release_resource(ar);
1758 	ath10k_qmi_deinit(ar);
1759 	ath10k_core_destroy(ar);
1760 
1761 	return 0;
1762 }
1763 
1764 static struct platform_driver ath10k_snoc_driver = {
1765 	.probe  = ath10k_snoc_probe,
1766 	.remove = ath10k_snoc_remove,
1767 	.driver = {
1768 		.name   = "ath10k_snoc",
1769 		.of_match_table = ath10k_snoc_dt_match,
1770 	},
1771 };
1772 module_platform_driver(ath10k_snoc_driver);
1773 
1774 MODULE_AUTHOR("Qualcomm");
1775 MODULE_LICENSE("Dual BSD/GPL");
1776 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1777