1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "hif.h"
20 #include "debug.h"
21
22 /********/
23 /* Send */
24 /********/
25
ath10k_htc_control_tx_complete(struct ath10k * ar,struct sk_buff * skb)26 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
27 struct sk_buff *skb)
28 {
29 kfree_skb(skb);
30 }
31
ath10k_htc_build_tx_ctrl_skb(void * ar)32 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
33 {
34 struct sk_buff *skb;
35 struct ath10k_skb_cb *skb_cb;
36
37 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
38 if (!skb)
39 return NULL;
40
41 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
42 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
43
44 skb_cb = ATH10K_SKB_CB(skb);
45 memset(skb_cb, 0, sizeof(*skb_cb));
46
47 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
48 return skb;
49 }
50
ath10k_htc_restore_tx_skb(struct ath10k_htc * htc,struct sk_buff * skb)51 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
52 struct sk_buff *skb)
53 {
54 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
55
56 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
57 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
58 }
59
ath10k_htc_notify_tx_completion(struct ath10k_htc_ep * ep,struct sk_buff * skb)60 void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
61 struct sk_buff *skb)
62 {
63 struct ath10k *ar = ep->htc->ar;
64
65 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
66 ep->eid, skb);
67
68 ath10k_htc_restore_tx_skb(ep->htc, skb);
69
70 if (!ep->ep_ops.ep_tx_complete) {
71 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
72 dev_kfree_skb_any(skb);
73 return;
74 }
75
76 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
77 }
78 EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
79
ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)80 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
81 struct sk_buff *skb)
82 {
83 struct ath10k_htc_hdr *hdr;
84
85 hdr = (struct ath10k_htc_hdr *)skb->data;
86
87 hdr->eid = ep->eid;
88 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
89 hdr->flags = 0;
90 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
91
92 spin_lock_bh(&ep->htc->tx_lock);
93 hdr->seq_no = ep->seq_no++;
94 spin_unlock_bh(&ep->htc->tx_lock);
95 }
96
ath10k_htc_send(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)97 int ath10k_htc_send(struct ath10k_htc *htc,
98 enum ath10k_htc_ep_id eid,
99 struct sk_buff *skb)
100 {
101 struct ath10k *ar = htc->ar;
102 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
103 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
104 struct ath10k_hif_sg_item sg_item;
105 struct device *dev = htc->ar->dev;
106 int credits = 0;
107 int ret;
108
109 if (htc->ar->state == ATH10K_STATE_WEDGED)
110 return -ECOMM;
111
112 if (eid >= ATH10K_HTC_EP_COUNT) {
113 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
114 return -ENOENT;
115 }
116
117 skb_push(skb, sizeof(struct ath10k_htc_hdr));
118
119 if (ep->tx_credit_flow_enabled) {
120 credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
121 spin_lock_bh(&htc->tx_lock);
122 if (ep->tx_credits < credits) {
123 ath10k_dbg(ar, ATH10K_DBG_HTC,
124 "htc insufficient credits ep %d required %d available %d\n",
125 eid, credits, ep->tx_credits);
126 spin_unlock_bh(&htc->tx_lock);
127 ret = -EAGAIN;
128 goto err_pull;
129 }
130 ep->tx_credits -= credits;
131 ath10k_dbg(ar, ATH10K_DBG_HTC,
132 "htc ep %d consumed %d credits (total %d)\n",
133 eid, credits, ep->tx_credits);
134 spin_unlock_bh(&htc->tx_lock);
135 }
136
137 ath10k_htc_prepare_tx_skb(ep, skb);
138
139 skb_cb->eid = eid;
140 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
141 ret = dma_mapping_error(dev, skb_cb->paddr);
142 if (ret) {
143 ret = -EIO;
144 goto err_credits;
145 }
146
147 sg_item.transfer_id = ep->eid;
148 sg_item.transfer_context = skb;
149 sg_item.vaddr = skb->data;
150 sg_item.paddr = skb_cb->paddr;
151 sg_item.len = skb->len;
152
153 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
154 if (ret)
155 goto err_unmap;
156
157 return 0;
158
159 err_unmap:
160 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
161 err_credits:
162 if (ep->tx_credit_flow_enabled) {
163 spin_lock_bh(&htc->tx_lock);
164 ep->tx_credits += credits;
165 ath10k_dbg(ar, ATH10K_DBG_HTC,
166 "htc ep %d reverted %d credits back (total %d)\n",
167 eid, credits, ep->tx_credits);
168 spin_unlock_bh(&htc->tx_lock);
169
170 if (ep->ep_ops.ep_tx_credits)
171 ep->ep_ops.ep_tx_credits(htc->ar);
172 }
173 err_pull:
174 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
175 return ret;
176 }
177
ath10k_htc_tx_completion_handler(struct ath10k * ar,struct sk_buff * skb)178 void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
179 {
180 struct ath10k_htc *htc = &ar->htc;
181 struct ath10k_skb_cb *skb_cb;
182 struct ath10k_htc_ep *ep;
183
184 if (WARN_ON_ONCE(!skb))
185 return;
186
187 skb_cb = ATH10K_SKB_CB(skb);
188 ep = &htc->endpoint[skb_cb->eid];
189
190 ath10k_htc_notify_tx_completion(ep, skb);
191 /* the skb now belongs to the completion handler */
192 }
193 EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
194
195 /***********/
196 /* Receive */
197 /***********/
198
199 static void
ath10k_htc_process_credit_report(struct ath10k_htc * htc,const struct ath10k_htc_credit_report * report,int len,enum ath10k_htc_ep_id eid)200 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
201 const struct ath10k_htc_credit_report *report,
202 int len,
203 enum ath10k_htc_ep_id eid)
204 {
205 struct ath10k *ar = htc->ar;
206 struct ath10k_htc_ep *ep;
207 int i, n_reports;
208
209 if (len % sizeof(*report))
210 ath10k_warn(ar, "Uneven credit report len %d", len);
211
212 n_reports = len / sizeof(*report);
213
214 spin_lock_bh(&htc->tx_lock);
215 for (i = 0; i < n_reports; i++, report++) {
216 if (report->eid >= ATH10K_HTC_EP_COUNT)
217 break;
218
219 ep = &htc->endpoint[report->eid];
220 ep->tx_credits += report->credits;
221
222 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
223 report->eid, report->credits, ep->tx_credits);
224
225 if (ep->ep_ops.ep_tx_credits) {
226 spin_unlock_bh(&htc->tx_lock);
227 ep->ep_ops.ep_tx_credits(htc->ar);
228 spin_lock_bh(&htc->tx_lock);
229 }
230 }
231 spin_unlock_bh(&htc->tx_lock);
232 }
233
234 static int
ath10k_htc_process_lookahead(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_report * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)235 ath10k_htc_process_lookahead(struct ath10k_htc *htc,
236 const struct ath10k_htc_lookahead_report *report,
237 int len,
238 enum ath10k_htc_ep_id eid,
239 void *next_lookaheads,
240 int *next_lookaheads_len)
241 {
242 struct ath10k *ar = htc->ar;
243
244 /* Invalid lookahead flags are actually transmitted by
245 * the target in the HTC control message.
246 * Since this will happen at every boot we silently ignore
247 * the lookahead in this case
248 */
249 if (report->pre_valid != ((~report->post_valid) & 0xFF))
250 return 0;
251
252 if (next_lookaheads && next_lookaheads_len) {
253 ath10k_dbg(ar, ATH10K_DBG_HTC,
254 "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
255 report->pre_valid, report->post_valid);
256
257 /* look ahead bytes are valid, copy them over */
258 memcpy((u8 *)next_lookaheads, report->lookahead, 4);
259
260 *next_lookaheads_len = 1;
261 }
262
263 return 0;
264 }
265
266 static int
ath10k_htc_process_lookahead_bundle(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_bundle * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)267 ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
268 const struct ath10k_htc_lookahead_bundle *report,
269 int len,
270 enum ath10k_htc_ep_id eid,
271 void *next_lookaheads,
272 int *next_lookaheads_len)
273 {
274 struct ath10k *ar = htc->ar;
275 int bundle_cnt = len / sizeof(*report);
276
277 if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
278 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
279 bundle_cnt);
280 return -EINVAL;
281 }
282
283 if (next_lookaheads && next_lookaheads_len) {
284 int i;
285
286 for (i = 0; i < bundle_cnt; i++) {
287 memcpy(((u8 *)next_lookaheads) + 4 * i,
288 report->lookahead, 4);
289 report++;
290 }
291
292 *next_lookaheads_len = bundle_cnt;
293 }
294
295 return 0;
296 }
297
ath10k_htc_process_trailer(struct ath10k_htc * htc,u8 * buffer,int length,enum ath10k_htc_ep_id src_eid,void * next_lookaheads,int * next_lookaheads_len)298 int ath10k_htc_process_trailer(struct ath10k_htc *htc,
299 u8 *buffer,
300 int length,
301 enum ath10k_htc_ep_id src_eid,
302 void *next_lookaheads,
303 int *next_lookaheads_len)
304 {
305 struct ath10k_htc_lookahead_bundle *bundle;
306 struct ath10k *ar = htc->ar;
307 int status = 0;
308 struct ath10k_htc_record *record;
309 u8 *orig_buffer;
310 int orig_length;
311 size_t len;
312
313 orig_buffer = buffer;
314 orig_length = length;
315
316 while (length > 0) {
317 record = (struct ath10k_htc_record *)buffer;
318
319 if (length < sizeof(record->hdr)) {
320 status = -EINVAL;
321 break;
322 }
323
324 if (record->hdr.len > length) {
325 /* no room left in buffer for record */
326 ath10k_warn(ar, "Invalid record length: %d\n",
327 record->hdr.len);
328 status = -EINVAL;
329 break;
330 }
331
332 switch (record->hdr.id) {
333 case ATH10K_HTC_RECORD_CREDITS:
334 len = sizeof(struct ath10k_htc_credit_report);
335 if (record->hdr.len < len) {
336 ath10k_warn(ar, "Credit report too long\n");
337 status = -EINVAL;
338 break;
339 }
340 ath10k_htc_process_credit_report(htc,
341 record->credit_report,
342 record->hdr.len,
343 src_eid);
344 break;
345 case ATH10K_HTC_RECORD_LOOKAHEAD:
346 len = sizeof(struct ath10k_htc_lookahead_report);
347 if (record->hdr.len < len) {
348 ath10k_warn(ar, "Lookahead report too long\n");
349 status = -EINVAL;
350 break;
351 }
352 status = ath10k_htc_process_lookahead(htc,
353 record->lookahead_report,
354 record->hdr.len,
355 src_eid,
356 next_lookaheads,
357 next_lookaheads_len);
358 break;
359 case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
360 bundle = record->lookahead_bundle;
361 status = ath10k_htc_process_lookahead_bundle(htc,
362 bundle,
363 record->hdr.len,
364 src_eid,
365 next_lookaheads,
366 next_lookaheads_len);
367 break;
368 default:
369 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
370 record->hdr.id, record->hdr.len);
371 break;
372 }
373
374 if (status)
375 break;
376
377 /* multiple records may be present in a trailer */
378 buffer += sizeof(record->hdr) + record->hdr.len;
379 length -= sizeof(record->hdr) + record->hdr.len;
380 }
381
382 if (status)
383 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
384 orig_buffer, orig_length);
385
386 return status;
387 }
388 EXPORT_SYMBOL(ath10k_htc_process_trailer);
389
ath10k_htc_rx_completion_handler(struct ath10k * ar,struct sk_buff * skb)390 void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
391 {
392 int status = 0;
393 struct ath10k_htc *htc = &ar->htc;
394 struct ath10k_htc_hdr *hdr;
395 struct ath10k_htc_ep *ep;
396 u16 payload_len;
397 u32 trailer_len = 0;
398 size_t min_len;
399 u8 eid;
400 bool trailer_present;
401
402 hdr = (struct ath10k_htc_hdr *)skb->data;
403 skb_pull(skb, sizeof(*hdr));
404
405 eid = hdr->eid;
406
407 if (eid >= ATH10K_HTC_EP_COUNT) {
408 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
409 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
410 hdr, sizeof(*hdr));
411 goto out;
412 }
413
414 ep = &htc->endpoint[eid];
415
416 payload_len = __le16_to_cpu(hdr->len);
417
418 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
419 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
420 payload_len + sizeof(*hdr));
421 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
422 hdr, sizeof(*hdr));
423 goto out;
424 }
425
426 if (skb->len < payload_len) {
427 ath10k_dbg(ar, ATH10K_DBG_HTC,
428 "HTC Rx: insufficient length, got %d, expected %d\n",
429 skb->len, payload_len);
430 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
431 "", hdr, sizeof(*hdr));
432 goto out;
433 }
434
435 /* get flags to check for trailer */
436 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
437 if (trailer_present) {
438 u8 *trailer;
439
440 trailer_len = hdr->trailer_len;
441 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
442
443 if ((trailer_len < min_len) ||
444 (trailer_len > payload_len)) {
445 ath10k_warn(ar, "Invalid trailer length: %d\n",
446 trailer_len);
447 goto out;
448 }
449
450 trailer = (u8 *)hdr;
451 trailer += sizeof(*hdr);
452 trailer += payload_len;
453 trailer -= trailer_len;
454 status = ath10k_htc_process_trailer(htc, trailer,
455 trailer_len, hdr->eid,
456 NULL, NULL);
457 if (status)
458 goto out;
459
460 skb_trim(skb, skb->len - trailer_len);
461 }
462
463 if (((int)payload_len - (int)trailer_len) <= 0)
464 /* zero length packet with trailer data, just drop these */
465 goto out;
466
467 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
468 eid, skb);
469 ep->ep_ops.ep_rx_complete(ar, skb);
470
471 /* skb is now owned by the rx completion handler */
472 skb = NULL;
473 out:
474 kfree_skb(skb);
475 }
476 EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
477
ath10k_htc_control_rx_complete(struct ath10k * ar,struct sk_buff * skb)478 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
479 struct sk_buff *skb)
480 {
481 struct ath10k_htc *htc = &ar->htc;
482 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
483
484 switch (__le16_to_cpu(msg->hdr.message_id)) {
485 case ATH10K_HTC_MSG_READY_ID:
486 case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
487 /* handle HTC control message */
488 if (completion_done(&htc->ctl_resp)) {
489 /* this is a fatal error, target should not be
490 * sending unsolicited messages on the ep 0
491 */
492 ath10k_warn(ar, "HTC rx ctrl still processing\n");
493 complete(&htc->ctl_resp);
494 goto out;
495 }
496
497 htc->control_resp_len =
498 min_t(int, skb->len,
499 ATH10K_HTC_MAX_CTRL_MSG_LEN);
500
501 memcpy(htc->control_resp_buffer, skb->data,
502 htc->control_resp_len);
503
504 complete(&htc->ctl_resp);
505 break;
506 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
507 htc->htc_ops.target_send_suspend_complete(ar);
508 break;
509 default:
510 ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
511 break;
512 }
513
514 out:
515 kfree_skb(skb);
516 }
517
518 /***************/
519 /* Init/Deinit */
520 /***************/
521
htc_service_name(enum ath10k_htc_svc_id id)522 static const char *htc_service_name(enum ath10k_htc_svc_id id)
523 {
524 switch (id) {
525 case ATH10K_HTC_SVC_ID_RESERVED:
526 return "Reserved";
527 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
528 return "Control";
529 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
530 return "WMI";
531 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
532 return "DATA BE";
533 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
534 return "DATA BK";
535 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
536 return "DATA VI";
537 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
538 return "DATA VO";
539 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
540 return "NMI Control";
541 case ATH10K_HTC_SVC_ID_NMI_DATA:
542 return "NMI Data";
543 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
544 return "HTT Data";
545 case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
546 return "HTT Data";
547 case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
548 return "HTT Data";
549 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
550 return "RAW";
551 case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
552 return "PKTLOG";
553 }
554
555 return "Unknown";
556 }
557
ath10k_htc_reset_endpoint_states(struct ath10k_htc * htc)558 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
559 {
560 struct ath10k_htc_ep *ep;
561 int i;
562
563 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
564 ep = &htc->endpoint[i];
565 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
566 ep->max_ep_message_len = 0;
567 ep->max_tx_queue_depth = 0;
568 ep->eid = i;
569 ep->htc = htc;
570 ep->tx_credit_flow_enabled = true;
571 }
572 }
573
ath10k_htc_get_credit_allocation(struct ath10k_htc * htc,u16 service_id)574 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
575 u16 service_id)
576 {
577 u8 allocation = 0;
578
579 /* The WMI control service is the only service with flow control.
580 * Let it have all transmit credits.
581 */
582 if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
583 allocation = htc->total_transmit_credits;
584
585 return allocation;
586 }
587
ath10k_htc_wait_target(struct ath10k_htc * htc)588 int ath10k_htc_wait_target(struct ath10k_htc *htc)
589 {
590 struct ath10k *ar = htc->ar;
591 int i, status = 0;
592 unsigned long time_left;
593 struct ath10k_htc_msg *msg;
594 u16 message_id;
595
596 time_left = wait_for_completion_timeout(&htc->ctl_resp,
597 ATH10K_HTC_WAIT_TIMEOUT_HZ);
598 if (!time_left) {
599 /* Workaround: In some cases the PCI HIF doesn't
600 * receive interrupt for the control response message
601 * even if the buffer was completed. It is suspected
602 * iomap writes unmasking PCI CE irqs aren't propagated
603 * properly in KVM PCI-passthrough sometimes.
604 */
605 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
606
607 for (i = 0; i < CE_COUNT; i++)
608 ath10k_hif_send_complete_check(htc->ar, i, 1);
609
610 time_left =
611 wait_for_completion_timeout(&htc->ctl_resp,
612 ATH10K_HTC_WAIT_TIMEOUT_HZ);
613
614 if (!time_left)
615 status = -ETIMEDOUT;
616 }
617
618 if (status < 0) {
619 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
620 return status;
621 }
622
623 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
624 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
625 htc->control_resp_len);
626 return -ECOMM;
627 }
628
629 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
630 message_id = __le16_to_cpu(msg->hdr.message_id);
631
632 if (message_id != ATH10K_HTC_MSG_READY_ID) {
633 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
634 return -ECOMM;
635 }
636
637 htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
638 htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
639
640 ath10k_dbg(ar, ATH10K_DBG_HTC,
641 "Target ready! transmit resources: %d size:%d\n",
642 htc->total_transmit_credits,
643 htc->target_credit_size);
644
645 if ((htc->total_transmit_credits == 0) ||
646 (htc->target_credit_size == 0)) {
647 ath10k_err(ar, "Invalid credit size received\n");
648 return -ECOMM;
649 }
650
651 /* The only way to determine if the ready message is an extended
652 * message is from the size.
653 */
654 if (htc->control_resp_len >=
655 sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
656 htc->max_msgs_per_htc_bundle =
657 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
658 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
659 ath10k_dbg(ar, ATH10K_DBG_HTC,
660 "Extended ready message. RX bundle size: %d\n",
661 htc->max_msgs_per_htc_bundle);
662 }
663
664 return 0;
665 }
666
ath10k_htc_connect_service(struct ath10k_htc * htc,struct ath10k_htc_svc_conn_req * conn_req,struct ath10k_htc_svc_conn_resp * conn_resp)667 int ath10k_htc_connect_service(struct ath10k_htc *htc,
668 struct ath10k_htc_svc_conn_req *conn_req,
669 struct ath10k_htc_svc_conn_resp *conn_resp)
670 {
671 struct ath10k *ar = htc->ar;
672 struct ath10k_htc_msg *msg;
673 struct ath10k_htc_conn_svc *req_msg;
674 struct ath10k_htc_conn_svc_response resp_msg_dummy;
675 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
676 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
677 struct ath10k_htc_ep *ep;
678 struct sk_buff *skb;
679 unsigned int max_msg_size = 0;
680 int length, status;
681 unsigned long time_left;
682 bool disable_credit_flow_ctrl = false;
683 u16 message_id, service_id, flags = 0;
684 u8 tx_alloc = 0;
685
686 /* special case for HTC pseudo control service */
687 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
688 disable_credit_flow_ctrl = true;
689 assigned_eid = ATH10K_HTC_EP_0;
690 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
691 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
692 goto setup;
693 }
694
695 tx_alloc = ath10k_htc_get_credit_allocation(htc,
696 conn_req->service_id);
697 if (!tx_alloc)
698 ath10k_dbg(ar, ATH10K_DBG_BOOT,
699 "boot htc service %s does not allocate target credits\n",
700 htc_service_name(conn_req->service_id));
701
702 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
703 if (!skb) {
704 ath10k_err(ar, "Failed to allocate HTC packet\n");
705 return -ENOMEM;
706 }
707
708 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
709 skb_put(skb, length);
710 memset(skb->data, 0, length);
711
712 msg = (struct ath10k_htc_msg *)skb->data;
713 msg->hdr.message_id =
714 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
715
716 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
717
718 /* Only enable credit flow control for WMI ctrl service */
719 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
720 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
721 disable_credit_flow_ctrl = true;
722 }
723
724 req_msg = &msg->connect_service;
725 req_msg->flags = __cpu_to_le16(flags);
726 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
727
728 reinit_completion(&htc->ctl_resp);
729
730 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
731 if (status) {
732 kfree_skb(skb);
733 return status;
734 }
735
736 /* wait for response */
737 time_left = wait_for_completion_timeout(&htc->ctl_resp,
738 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
739 if (!time_left) {
740 ath10k_err(ar, "Service connect timeout\n");
741 return -ETIMEDOUT;
742 }
743
744 /* we controlled the buffer creation, it's aligned */
745 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
746 resp_msg = &msg->connect_service_response;
747 message_id = __le16_to_cpu(msg->hdr.message_id);
748 service_id = __le16_to_cpu(resp_msg->service_id);
749
750 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
751 (htc->control_resp_len < sizeof(msg->hdr) +
752 sizeof(msg->connect_service_response))) {
753 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
754 return -EPROTO;
755 }
756
757 ath10k_dbg(ar, ATH10K_DBG_HTC,
758 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
759 htc_service_name(service_id),
760 resp_msg->status, resp_msg->eid);
761
762 conn_resp->connect_resp_code = resp_msg->status;
763
764 /* check response status */
765 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
766 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
767 htc_service_name(service_id),
768 resp_msg->status);
769 return -EPROTO;
770 }
771
772 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
773 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
774
775 setup:
776
777 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
778 return -EPROTO;
779
780 if (max_msg_size == 0)
781 return -EPROTO;
782
783 ep = &htc->endpoint[assigned_eid];
784 ep->eid = assigned_eid;
785
786 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
787 return -EPROTO;
788
789 /* return assigned endpoint to caller */
790 conn_resp->eid = assigned_eid;
791 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
792
793 /* setup the endpoint */
794 ep->service_id = conn_req->service_id;
795 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
796 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
797 ep->tx_credits = tx_alloc;
798
799 /* copy all the callbacks */
800 ep->ep_ops = conn_req->ep_ops;
801
802 status = ath10k_hif_map_service_to_pipe(htc->ar,
803 ep->service_id,
804 &ep->ul_pipe_id,
805 &ep->dl_pipe_id);
806 if (status)
807 return status;
808
809 ath10k_dbg(ar, ATH10K_DBG_BOOT,
810 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
811 htc_service_name(ep->service_id), ep->ul_pipe_id,
812 ep->dl_pipe_id, ep->eid);
813
814 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
815 ep->tx_credit_flow_enabled = false;
816 ath10k_dbg(ar, ATH10K_DBG_BOOT,
817 "boot htc service '%s' eid %d TX flow control disabled\n",
818 htc_service_name(ep->service_id), assigned_eid);
819 }
820
821 return status;
822 }
823
ath10k_htc_alloc_skb(struct ath10k * ar,int size)824 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
825 {
826 struct sk_buff *skb;
827
828 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
829 if (!skb)
830 return NULL;
831
832 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
833
834 /* FW/HTC requires 4-byte aligned streams */
835 if (!IS_ALIGNED((unsigned long)skb->data, 4))
836 ath10k_warn(ar, "Unaligned HTC tx skb\n");
837
838 return skb;
839 }
840
ath10k_htc_start(struct ath10k_htc * htc)841 int ath10k_htc_start(struct ath10k_htc *htc)
842 {
843 struct ath10k *ar = htc->ar;
844 struct sk_buff *skb;
845 int status = 0;
846 struct ath10k_htc_msg *msg;
847
848 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
849 if (!skb)
850 return -ENOMEM;
851
852 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
853 memset(skb->data, 0, skb->len);
854
855 msg = (struct ath10k_htc_msg *)skb->data;
856 msg->hdr.message_id =
857 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
858
859 if (ar->hif.bus == ATH10K_BUS_SDIO) {
860 /* Extra setup params used by SDIO */
861 msg->setup_complete_ext.flags =
862 __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
863 msg->setup_complete_ext.max_msgs_per_bundled_recv =
864 htc->max_msgs_per_htc_bundle;
865 }
866 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
867
868 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
869 if (status) {
870 kfree_skb(skb);
871 return status;
872 }
873
874 return 0;
875 }
876
877 /* registered target arrival callback from the HIF layer */
ath10k_htc_init(struct ath10k * ar)878 int ath10k_htc_init(struct ath10k *ar)
879 {
880 int status;
881 struct ath10k_htc *htc = &ar->htc;
882 struct ath10k_htc_svc_conn_req conn_req;
883 struct ath10k_htc_svc_conn_resp conn_resp;
884
885 spin_lock_init(&htc->tx_lock);
886
887 ath10k_htc_reset_endpoint_states(htc);
888
889 htc->ar = ar;
890
891 /* setup our pseudo HTC control endpoint connection */
892 memset(&conn_req, 0, sizeof(conn_req));
893 memset(&conn_resp, 0, sizeof(conn_resp));
894 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
895 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
896 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
897 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
898
899 /* connect fake service */
900 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
901 if (status) {
902 ath10k_err(ar, "could not connect to htc service (%d)\n",
903 status);
904 return status;
905 }
906
907 init_completion(&htc->ctl_resp);
908
909 return 0;
910 }
911