Lines Matching +full:rx +full:- +full:eq

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
5 * Copyright (c) 2010, ST-Ericsson
34 cw1200_queue_lock(&priv->tx_queue[i]); in cw1200_tx_queues_lock()
41 cw1200_queue_unlock(&priv->tx_queue[i]); in cw1200_tx_queues_unlock()
50 policy->raw[0] & 0x0F, policy->raw[0] >> 4, in tx_policy_dump()
51 policy->raw[1] & 0x0F, policy->raw[1] >> 4, in tx_policy_dump()
52 policy->raw[2] & 0x0F, policy->raw[2] >> 4, in tx_policy_dump()
53 policy->raw[3] & 0x0F, policy->raw[3] >> 4, in tx_policy_dump()
54 policy->raw[4] & 0x0F, policy->raw[4] >> 4, in tx_policy_dump()
55 policy->raw[5] & 0x0F, policy->raw[5] >> 4, in tx_policy_dump()
56 policy->raw[6] & 0x0F, policy->raw[6] >> 4, in tx_policy_dump()
57 policy->raw[7] & 0x0F, policy->raw[7] >> 4, in tx_policy_dump()
58 policy->raw[8] & 0x0F, policy->raw[8] >> 4, in tx_policy_dump()
59 policy->raw[9] & 0x0F, policy->raw[9] >> 4, in tx_policy_dump()
60 policy->raw[10] & 0x0F, policy->raw[10] >> 4, in tx_policy_dump()
61 policy->raw[11] & 0x0F, policy->raw[11] >> 4, in tx_policy_dump()
62 policy->defined); in tx_policy_dump()
70 unsigned limit = priv->short_frame_max_tx_count; in tx_policy_build()
81 if (rates[i].idx > rates[i - 1].idx) { in tx_policy_build()
82 struct ieee80211_tx_rate tmp = rates[i - 1]; in tx_policy_build()
83 rates[i - 1] = rates[i]; in tx_policy_build()
104 /* Re-fill policy trying to keep every requested rate and with in tx_policy_build()
111 int left = count - i - 1; in tx_policy_build()
112 if (rates[i].count > limit - left) in tx_policy_build()
113 rates[i].count = limit - left; in tx_policy_build()
114 limit -= rates[i].count; in tx_policy_build()
120 * of time (100-200 ms), leading to valuable throughput drop. in tx_policy_build()
121 * As a workaround, additional g-rates are injected to the in tx_policy_build()
130 rates[0].count -= 2; in tx_policy_build()
136 /* Inject 1 transmission on lowest g-rate */ in tx_policy_build()
141 /* Inject 1 transmission on mid-rate */ in tx_policy_build()
151 --rates[0].count; in tx_policy_build()
161 /* Inject 2 transmissions on lowest g-rate */ in tx_policy_build()
170 policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; in tx_policy_build()
175 rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; in tx_policy_build()
176 off = rateid >> 3; /* eq. rateid / 8 */ in tx_policy_build()
177 shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ in tx_policy_build()
184 policy->tbl[off] |= __cpu_to_le32(retries << shift); in tx_policy_build()
185 policy->retry_count += retries; in tx_policy_build()
199 size_t count = wanted->defined >> 1; in tx_policy_is_equal()
200 if (wanted->defined > cached->defined) in tx_policy_is_equal()
203 if (memcmp(wanted->raw, cached->raw, count)) in tx_policy_is_equal()
206 if (wanted->defined & 1) { in tx_policy_is_equal()
207 if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) in tx_policy_is_equal()
222 list_for_each_entry(it, &cache->used, link) { in tx_policy_find()
223 if (tx_policy_is_equal(wanted, &it->policy)) in tx_policy_find()
224 return it - cache->cache; in tx_policy_find()
226 /* Then - in "free list" */ in tx_policy_find()
227 list_for_each_entry(it, &cache->free, link) { in tx_policy_find()
228 if (tx_policy_is_equal(wanted, &it->policy)) in tx_policy_find()
229 return it - cache->cache; in tx_policy_find()
231 return -1; in tx_policy_find()
237 ++entry->policy.usage_count; in tx_policy_use()
238 list_move(&entry->link, &cache->used); in tx_policy_use()
244 int ret = --entry->policy.usage_count; in tx_policy_release()
246 list_move(&entry->link, &cache->free); in tx_policy_release()
253 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_clean()
257 spin_lock_bh(&cache->lock); in tx_policy_clean()
258 locked = list_empty(&cache->free); in tx_policy_clean()
261 entry = &cache->cache[idx]; in tx_policy_clean()
265 if (WARN_ON(entry->policy.usage_count)) { in tx_policy_clean()
266 entry->policy.usage_count = 0; in tx_policy_clean()
267 list_move(&entry->link, &cache->free); in tx_policy_clean()
269 memset(&entry->policy, 0, sizeof(entry->policy)); in tx_policy_clean()
275 spin_unlock_bh(&cache->lock); in tx_policy_clean()
283 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_init()
288 spin_lock_init(&cache->lock); in tx_policy_init()
289 INIT_LIST_HEAD(&cache->used); in tx_policy_init()
290 INIT_LIST_HEAD(&cache->free); in tx_policy_init()
293 list_add(&cache->cache[i].link, &cache->free); in tx_policy_init()
301 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_get()
306 spin_lock_bh(&cache->lock); in tx_policy_get()
307 if (WARN_ON_ONCE(list_empty(&cache->free))) { in tx_policy_get()
308 spin_unlock_bh(&cache->lock); in tx_policy_get()
321 entry = list_entry(cache->free.prev, in tx_policy_get()
323 entry->policy = wanted; in tx_policy_get()
324 idx = entry - cache->cache; in tx_policy_get()
326 tx_policy_dump(&entry->policy); in tx_policy_get()
328 tx_policy_use(cache, &cache->cache[idx]); in tx_policy_get()
329 if (list_empty(&cache->free)) { in tx_policy_get()
333 spin_unlock_bh(&cache->lock); in tx_policy_get()
340 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_put()
342 spin_lock_bh(&cache->lock); in tx_policy_put()
343 locked = list_empty(&cache->free); in tx_policy_put()
344 usage = tx_policy_release(cache, &cache->cache[idx]); in tx_policy_put()
349 spin_unlock_bh(&cache->lock); in tx_policy_put()
354 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_upload()
359 spin_lock_bh(&cache->lock); in tx_policy_upload()
363 struct tx_policy *src = &cache->cache[i].policy; in tx_policy_upload()
364 if (src->retry_count && !src->uploaded) { in tx_policy_upload()
367 dst->index = i; in tx_policy_upload()
368 dst->short_retries = priv->short_frame_max_tx_count; in tx_policy_upload()
369 dst->long_retries = priv->long_frame_max_tx_count; in tx_policy_upload()
371 dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | in tx_policy_upload()
373 memcpy(dst->rate_count_indices, src->tbl, in tx_policy_upload()
374 sizeof(dst->rate_count_indices)); in tx_policy_upload()
375 src->uploaded = 1; in tx_policy_upload()
379 spin_unlock_bh(&cache->lock); in tx_policy_upload()
419 ret |= BIT(priv->rates[i].hw_value); in cw1200_rate_mask_to_wsm()
428 if (rate->idx < 0) in cw1200_get_tx_rate()
430 if (rate->flags & IEEE80211_TX_RC_MCS) in cw1200_get_tx_rate()
431 return &priv->mcs_rates[rate->idx]; in cw1200_get_tx_rate()
432 return &priv->hw->wiphy->bands[priv->channel->band]-> in cw1200_get_tx_rate()
433 bitrates[rate->idx]; in cw1200_get_tx_rate()
440 if (t->sta && t->sta_priv->link_id) in cw1200_tx_h_calc_link_ids()
441 t->txpriv.raw_link_id = in cw1200_tx_h_calc_link_ids()
442 t->txpriv.link_id = in cw1200_tx_h_calc_link_ids()
443 t->sta_priv->link_id; in cw1200_tx_h_calc_link_ids()
444 else if (priv->mode != NL80211_IFTYPE_AP) in cw1200_tx_h_calc_link_ids()
445 t->txpriv.raw_link_id = in cw1200_tx_h_calc_link_ids()
446 t->txpriv.link_id = 0; in cw1200_tx_h_calc_link_ids()
447 else if (is_multicast_ether_addr(t->da)) { in cw1200_tx_h_calc_link_ids()
448 if (priv->enable_beacon) { in cw1200_tx_h_calc_link_ids()
449 t->txpriv.raw_link_id = 0; in cw1200_tx_h_calc_link_ids()
450 t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; in cw1200_tx_h_calc_link_ids()
452 t->txpriv.raw_link_id = 0; in cw1200_tx_h_calc_link_ids()
453 t->txpriv.link_id = 0; in cw1200_tx_h_calc_link_ids()
456 t->txpriv.link_id = cw1200_find_link_id(priv, t->da); in cw1200_tx_h_calc_link_ids()
457 if (!t->txpriv.link_id) in cw1200_tx_h_calc_link_ids()
458 t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); in cw1200_tx_h_calc_link_ids()
459 if (!t->txpriv.link_id) { in cw1200_tx_h_calc_link_ids()
460 wiphy_err(priv->hw->wiphy, in cw1200_tx_h_calc_link_ids()
462 return -ENOENT; in cw1200_tx_h_calc_link_ids()
464 t->txpriv.raw_link_id = t->txpriv.link_id; in cw1200_tx_h_calc_link_ids()
466 if (t->txpriv.raw_link_id) in cw1200_tx_h_calc_link_ids()
467 priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = in cw1200_tx_h_calc_link_ids()
469 if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) in cw1200_tx_h_calc_link_ids()
470 t->txpriv.link_id = CW1200_LINK_ID_UAPSD; in cw1200_tx_h_calc_link_ids()
478 if (ieee80211_is_auth(t->hdr->frame_control)) { in cw1200_tx_h_pm()
479 u32 mask = ~BIT(t->txpriv.raw_link_id); in cw1200_tx_h_pm()
480 spin_lock_bh(&priv->ps_state_lock); in cw1200_tx_h_pm()
481 priv->sta_asleep_mask &= mask; in cw1200_tx_h_pm()
482 priv->pspoll_mask &= mask; in cw1200_tx_h_pm()
483 spin_unlock_bh(&priv->ps_state_lock); in cw1200_tx_h_pm()
491 if (ieee80211_is_data_qos(t->hdr->frame_control)) { in cw1200_tx_h_calc_tid()
492 u8 *qos = ieee80211_get_qos_ctl(t->hdr); in cw1200_tx_h_calc_tid()
493 t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; in cw1200_tx_h_calc_tid()
494 } else if (ieee80211_is_data(t->hdr->frame_control)) { in cw1200_tx_h_calc_tid()
495 t->txpriv.tid = 0; in cw1200_tx_h_calc_tid()
503 if (!t->tx_info->control.hw_key || in cw1200_tx_h_crypt()
504 !ieee80211_has_protected(t->hdr->frame_control)) in cw1200_tx_h_crypt()
507 t->hdrlen += t->tx_info->control.hw_key->iv_len; in cw1200_tx_h_crypt()
508 skb_put(t->skb, t->tx_info->control.hw_key->icv_len); in cw1200_tx_h_crypt()
510 if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) in cw1200_tx_h_crypt()
511 skb_put(t->skb, 8); /* MIC space */ in cw1200_tx_h_crypt()
521 size_t offset = (size_t)t->skb->data & 3; in cw1200_tx_h_align()
527 wiphy_err(priv->hw->wiphy, in cw1200_tx_h_align()
530 return -EINVAL; in cw1200_tx_h_align()
533 if (skb_headroom(t->skb) < offset) { in cw1200_tx_h_align()
534 wiphy_err(priv->hw->wiphy, in cw1200_tx_h_align()
536 skb_headroom(t->skb)); in cw1200_tx_h_align()
537 return -ENOMEM; in cw1200_tx_h_align()
539 skb_push(t->skb, offset); in cw1200_tx_h_align()
540 t->hdrlen += offset; in cw1200_tx_h_align()
541 t->txpriv.offset += offset; in cw1200_tx_h_align()
552 (struct ieee80211_mgmt *)t->hdr; in cw1200_tx_h_action()
553 if (ieee80211_is_action(t->hdr->frame_control) && in cw1200_tx_h_action()
554 mgmt->u.action.category == WLAN_CATEGORY_BACK) in cw1200_tx_h_action()
567 if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { in cw1200_tx_h_wsm()
568 wiphy_err(priv->hw->wiphy, in cw1200_tx_h_wsm()
570 skb_headroom(t->skb)); in cw1200_tx_h_wsm()
574 wsm = skb_push(t->skb, sizeof(struct wsm_tx)); in cw1200_tx_h_wsm()
575 t->txpriv.offset += sizeof(struct wsm_tx); in cw1200_tx_h_wsm()
577 wsm->hdr.len = __cpu_to_le16(t->skb->len); in cw1200_tx_h_wsm()
578 wsm->hdr.id = __cpu_to_le16(0x0004); in cw1200_tx_h_wsm()
579 wsm->queue_id = wsm_queue_id_to_wsm(t->queue); in cw1200_tx_h_wsm()
591 if (!priv->bt_present) in cw1200_tx_h_bt()
594 if (ieee80211_is_nullfunc(t->hdr->frame_control)) { in cw1200_tx_h_bt()
596 } else if (ieee80211_is_data(t->hdr->frame_control)) { in cw1200_tx_h_bt()
598 u8 *payload = &t->skb->data[t->hdrlen]; in cw1200_tx_h_bt()
602 } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || in cw1200_tx_h_bt()
603 ieee80211_is_reassoc_req(t->hdr->frame_control)) { in cw1200_tx_h_bt()
605 (struct ieee80211_mgmt *)t->hdr; in cw1200_tx_h_bt()
607 if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) < in cw1200_tx_h_bt()
608 priv->listen_interval) { in cw1200_tx_h_bt()
610 priv->listen_interval, in cw1200_tx_h_bt()
611 mgt_frame->u.assoc_req.listen_interval); in cw1200_tx_h_bt()
615 mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval); in cw1200_tx_h_bt()
620 if (ieee80211_is_action(t->hdr->frame_control)) in cw1200_tx_h_bt()
622 else if (ieee80211_is_mgmt(t->hdr->frame_control)) in cw1200_tx_h_bt()
624 else if (wsm->queue_id == WSM_QUEUE_VOICE) in cw1200_tx_h_bt()
626 else if (wsm->queue_id == WSM_QUEUE_VIDEO) in cw1200_tx_h_bt()
634 wsm->flags |= priority << 1; in cw1200_tx_h_bt()
644 t->txpriv.rate_id = tx_policy_get(priv, in cw1200_tx_h_rate_policy()
645 t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, in cw1200_tx_h_rate_policy()
647 if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) in cw1200_tx_h_rate_policy()
648 return -EFAULT; in cw1200_tx_h_rate_policy()
650 wsm->flags |= t->txpriv.rate_id << 4; in cw1200_tx_h_rate_policy()
652 t->rate = cw1200_get_tx_rate(priv, in cw1200_tx_h_rate_policy()
653 &t->tx_info->control.rates[0]), in cw1200_tx_h_rate_policy()
654 wsm->max_tx_rate = t->rate->hw_value; in cw1200_tx_h_rate_policy()
655 if (t->rate->flags & IEEE80211_TX_RC_MCS) { in cw1200_tx_h_rate_policy()
656 if (cw1200_ht_greenfield(&priv->ht_info)) in cw1200_tx_h_rate_policy()
657 wsm->ht_tx_parameters |= in cw1200_tx_h_rate_policy()
660 wsm->ht_tx_parameters |= in cw1200_tx_h_rate_policy()
672 if (queue_work(priv->workqueue, in cw1200_tx_h_rate_policy()
673 &priv->tx_policy_upload_work) <= 0) { in cw1200_tx_h_rate_policy()
687 if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && in cw1200_tx_h_pm_state()
688 !priv->buffered_multicasts) { in cw1200_tx_h_pm_state()
689 priv->buffered_multicasts = true; in cw1200_tx_h_pm_state()
690 if (priv->sta_asleep_mask) in cw1200_tx_h_pm_state()
691 queue_work(priv->workqueue, in cw1200_tx_h_pm_state()
692 &priv->multicast_start_work); in cw1200_tx_h_pm_state()
695 if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) in cw1200_tx_h_pm_state()
696 was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; in cw1200_tx_h_pm_state()
707 struct cw1200_common *priv = dev->priv; in cw1200_tx()
712 .hdr = (struct ieee80211_hdr *)skb->data, in cw1200_tx()
722 if (priv->bh_error) in cw1200_tx()
725 t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); in cw1200_tx()
728 t.sta = control->sta; in cw1200_tx()
729 t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; in cw1200_tx()
740 skb->len, t.queue, t.txpriv.link_id, in cw1200_tx()
756 ret = -ENOMEM; in cw1200_tx()
759 wsm->flags |= flags; in cw1200_tx()
768 spin_lock_bh(&priv->ps_state_lock); in cw1200_tx()
771 BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], in cw1200_tx()
774 spin_unlock_bh(&priv->ps_state_lock); in cw1200_tx()
795 struct ieee80211_mgmt *mgmt = (void *)skb->data; in cw1200_handle_action_rx()
798 if (mgmt->u.action.category == WLAN_CATEGORY_BACK) in cw1200_handle_action_rx()
808 struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; in cw1200_handle_pspoll()
814 if (priv->join_status != CW1200_JOIN_STATUS_AP) in cw1200_handle_pspoll()
816 if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) in cw1200_handle_pspoll()
820 sta = ieee80211_find_sta(priv->vif, pspoll->ta); in cw1200_handle_pspoll()
823 sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; in cw1200_handle_pspoll()
824 link_id = sta_priv->link_id; in cw1200_handle_pspoll()
825 pspoll_mask = BIT(sta_priv->link_id); in cw1200_handle_pspoll()
831 priv->pspoll_mask |= pspoll_mask; in cw1200_handle_pspoll()
836 if (cw1200_queue_get_num_queued(&priv->tx_queue[i], in cw1200_handle_pspoll()
843 pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); in cw1200_handle_pspoll()
854 u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); in cw1200_tx_confirm_cb()
855 struct cw1200_queue *queue = &priv->tx_queue[queue_id]; in cw1200_tx_confirm_cb()
860 arg->status, arg->ack_failures); in cw1200_tx_confirm_cb()
862 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { in cw1200_tx_confirm_cb()
870 if (arg->status) in cw1200_tx_confirm_cb()
871 pr_debug("TX failed: %d.\n", arg->status); in cw1200_tx_confirm_cb()
873 if ((arg->status == WSM_REQUEUE) && in cw1200_tx_confirm_cb()
874 (arg->flags & WSM_TX_STATUS_REQUEUE)) { in cw1200_tx_confirm_cb()
882 wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", in cw1200_tx_confirm_cb()
884 cw1200_queue_get_generation(arg->packet_id) + 1, in cw1200_tx_confirm_cb()
885 priv->sta_asleep_mask); in cw1200_tx_confirm_cb()
886 cw1200_queue_requeue(queue, arg->packet_id); in cw1200_tx_confirm_cb()
887 spin_lock_bh(&priv->ps_state_lock); in cw1200_tx_confirm_cb()
889 priv->buffered_multicasts = true; in cw1200_tx_confirm_cb()
890 if (priv->sta_asleep_mask) { in cw1200_tx_confirm_cb()
891 queue_work(priv->workqueue, in cw1200_tx_confirm_cb()
892 &priv->multicast_start_work); in cw1200_tx_confirm_cb()
895 spin_unlock_bh(&priv->ps_state_lock); in cw1200_tx_confirm_cb()
896 } else if (!cw1200_queue_get_skb(queue, arg->packet_id, in cw1200_tx_confirm_cb()
899 int tx_count = arg->ack_failures; in cw1200_tx_confirm_cb()
903 if (cw1200_ht_greenfield(&priv->ht_info)) in cw1200_tx_confirm_cb()
906 spin_lock(&priv->bss_loss_lock); in cw1200_tx_confirm_cb()
907 if (priv->bss_loss_state && in cw1200_tx_confirm_cb()
908 arg->packet_id == priv->bss_loss_confirm_id) { in cw1200_tx_confirm_cb()
909 if (arg->status) { in cw1200_tx_confirm_cb()
917 spin_unlock(&priv->bss_loss_lock); in cw1200_tx_confirm_cb()
919 if (!arg->status) { in cw1200_tx_confirm_cb()
920 tx->flags |= IEEE80211_TX_STAT_ACK; in cw1200_tx_confirm_cb()
923 if (arg->flags & WSM_TX_STATUS_AGGREGATION) { in cw1200_tx_confirm_cb()
927 /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ in cw1200_tx_confirm_cb()
936 if (tx->status.rates[i].count >= tx_count) { in cw1200_tx_confirm_cb()
937 tx->status.rates[i].count = tx_count; in cw1200_tx_confirm_cb()
940 tx_count -= tx->status.rates[i].count; in cw1200_tx_confirm_cb()
941 if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) in cw1200_tx_confirm_cb()
942 tx->status.rates[i].flags |= ht_flags; in cw1200_tx_confirm_cb()
946 tx->status.rates[i].count = 0; in cw1200_tx_confirm_cb()
947 tx->status.rates[i].idx = -1; in cw1200_tx_confirm_cb()
951 if (tx->control.hw_key) { in cw1200_tx_confirm_cb()
952 skb_trim(skb, skb->len - tx->control.hw_key->icv_len); in cw1200_tx_confirm_cb()
953 if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) in cw1200_tx_confirm_cb()
954 skb_trim(skb, skb->len - 8); /* MIC space */ in cw1200_tx_confirm_cb()
956 cw1200_queue_remove(queue, arg->packet_id); in cw1200_tx_confirm_cb()
971 buffered = priv->link_id_db in cw1200_notify_buffered_tx()
972 [link_id - 1].buffered; in cw1200_notify_buffered_tx()
974 spin_lock_bh(&priv->ps_state_lock); in cw1200_notify_buffered_tx()
976 still_buffered = --buffered[tid]; in cw1200_notify_buffered_tx()
977 spin_unlock_bh(&priv->ps_state_lock); in cw1200_notify_buffered_tx()
980 hdr = (struct ieee80211_hdr *)skb->data; in cw1200_notify_buffered_tx()
982 sta = ieee80211_find_sta(priv->vif, hdr->addr1); in cw1200_notify_buffered_tx()
994 skb_pull(skb, txpriv->offset); in cw1200_skb_dtor()
995 if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { in cw1200_skb_dtor()
997 txpriv->raw_link_id, txpriv->tid); in cw1200_skb_dtor()
998 tx_policy_put(priv, txpriv->rate_id); in cw1200_skb_dtor()
1000 ieee80211_tx_status(priv->hw, skb); in cw1200_skb_dtor()
1010 struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; in cw1200_rx_cb()
1011 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; in cw1200_rx_cb()
1016 bool p2p = priv->vif && priv->vif->p2p; in cw1200_rx_cb()
1018 hdr->flag = 0; in cw1200_rx_cb()
1020 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { in cw1200_rx_cb()
1026 entry = &priv->link_id_db[link_id - 1]; in cw1200_rx_cb()
1027 if (entry->status == CW1200_LINK_SOFT && in cw1200_rx_cb()
1028 ieee80211_is_data(frame->frame_control)) in cw1200_rx_cb()
1030 entry->timestamp = jiffies; in cw1200_rx_cb()
1032 ieee80211_is_action(frame->frame_control) && in cw1200_rx_cb()
1033 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { in cw1200_rx_cb()
1034 pr_debug("[RX] Going to MAP&RESET link ID\n"); in cw1200_rx_cb()
1035 WARN_ON(work_pending(&priv->linkid_reset_work)); in cw1200_rx_cb()
1036 memcpy(&priv->action_frame_sa[0], in cw1200_rx_cb()
1038 priv->action_linkid = 0; in cw1200_rx_cb()
1039 schedule_work(&priv->linkid_reset_work); in cw1200_rx_cb()
1043 ieee80211_is_action(frame->frame_control) && in cw1200_rx_cb()
1044 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { in cw1200_rx_cb()
1048 WARN_ON(work_pending(&priv->linkid_reset_work)); in cw1200_rx_cb()
1049 memcpy(&priv->action_frame_sa[0], in cw1200_rx_cb()
1051 priv->action_linkid = link_id; in cw1200_rx_cb()
1052 schedule_work(&priv->linkid_reset_work); in cw1200_rx_cb()
1054 if (arg->status) { in cw1200_rx_cb()
1055 if (arg->status == WSM_STATUS_MICFAILURE) { in cw1200_rx_cb()
1056 pr_debug("[RX] MIC failure.\n"); in cw1200_rx_cb()
1057 hdr->flag |= RX_FLAG_MMIC_ERROR; in cw1200_rx_cb()
1058 } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { in cw1200_rx_cb()
1059 pr_debug("[RX] No key found.\n"); in cw1200_rx_cb()
1062 pr_debug("[RX] Receive failure: %d.\n", in cw1200_rx_cb()
1063 arg->status); in cw1200_rx_cb()
1068 if (skb->len < sizeof(struct ieee80211_pspoll)) { in cw1200_rx_cb()
1069 wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n"); in cw1200_rx_cb()
1073 if (ieee80211_is_pspoll(frame->frame_control)) in cw1200_rx_cb()
1077 hdr->band = ((arg->channel_number & 0xff00) || in cw1200_rx_cb()
1078 (arg->channel_number > 14)) ? in cw1200_rx_cb()
1080 hdr->freq = ieee80211_channel_to_frequency( in cw1200_rx_cb()
1081 arg->channel_number, in cw1200_rx_cb()
1082 hdr->band); in cw1200_rx_cb()
1084 if (arg->rx_rate >= 14) { in cw1200_rx_cb()
1085 hdr->encoding = RX_ENC_HT; in cw1200_rx_cb()
1086 hdr->rate_idx = arg->rx_rate - 14; in cw1200_rx_cb()
1087 } else if (arg->rx_rate >= 4) { in cw1200_rx_cb()
1088 hdr->rate_idx = arg->rx_rate - 2; in cw1200_rx_cb()
1090 hdr->rate_idx = arg->rx_rate; in cw1200_rx_cb()
1093 hdr->signal = (s8)arg->rcpi_rssi; in cw1200_rx_cb()
1094 hdr->antenna = 0; in cw1200_rx_cb()
1096 hdrlen = ieee80211_hdrlen(frame->frame_control); in cw1200_rx_cb()
1098 if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { in cw1200_rx_cb()
1101 hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; in cw1200_rx_cb()
1106 switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { in cw1200_rx_cb()
1115 hdr->flag |= RX_FLAG_MMIC_STRIPPED; in cw1200_rx_cb()
1127 WSM_RX_STATUS_ENCRYPTION(arg->flags)); in cw1200_rx_cb()
1132 if (arg->status == WSM_STATUS_MICFAILURE) in cw1200_rx_cb()
1135 if (skb->len < hdrlen + iv_len + icv_len) { in cw1200_rx_cb()
1136 wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); in cw1200_rx_cb()
1141 skb_trim(skb, skb->len - icv_len); in cw1200_rx_cb()
1142 memmove(skb->data + iv_len, skb->data, hdrlen); in cw1200_rx_cb()
1147 if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { in cw1200_rx_cb()
1148 memcpy(&hdr->mactime, skb->data + skb->len - 8, 8); in cw1200_rx_cb()
1149 hdr->mactime = le64_to_cpu(hdr->mactime); in cw1200_rx_cb()
1150 if (skb->len >= 8) in cw1200_rx_cb()
1151 skb_trim(skb, skb->len - 8); in cw1200_rx_cb()
1153 hdr->mactime = 0; in cw1200_rx_cb()
1157 if (arg->flags & WSM_RX_STATUS_AGGREGATE) in cw1200_rx_cb()
1160 if (ieee80211_is_action(frame->frame_control) && in cw1200_rx_cb()
1161 (arg->flags & WSM_RX_STATUS_ADDRESS1)) { in cw1200_rx_cb()
1164 } else if (ieee80211_is_beacon(frame->frame_control) && in cw1200_rx_cb()
1165 !arg->status && priv->vif && in cw1200_rx_cb()
1166 ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) { in cw1200_rx_cb()
1169 (skb->data))->u.beacon.variable; in cw1200_rx_cb()
1170 size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); in cw1200_rx_cb()
1177 if (priv->join_dtim_period != tim->dtim_period) { in cw1200_rx_cb()
1178 priv->join_dtim_period = tim->dtim_period; in cw1200_rx_cb()
1179 queue_work(priv->workqueue, in cw1200_rx_cb()
1180 &priv->set_beacon_wakeup_period_work); in cw1200_rx_cb()
1185 if (priv->disable_beacon_filter && in cw1200_rx_cb()
1186 (priv->vif->bss_conf.assoc || in cw1200_rx_cb()
1187 priv->vif->bss_conf.ibss_joined)) { in cw1200_rx_cb()
1188 priv->disable_beacon_filter = false; in cw1200_rx_cb()
1189 queue_work(priv->workqueue, in cw1200_rx_cb()
1190 &priv->update_filtering_work); in cw1200_rx_cb()
1198 if (ieee80211_is_auth(frame->frame_control)) in cw1200_rx_cb()
1200 else if (ieee80211_is_deauth(frame->frame_control)) in cw1200_rx_cb()
1204 cw1200_pm_stay_awake(&priv->pm_state, grace_period); in cw1200_rx_cb()
1207 spin_lock_bh(&priv->ps_state_lock); in cw1200_rx_cb()
1208 /* Double-check status with lock held */ in cw1200_rx_cb()
1209 if (entry->status == CW1200_LINK_SOFT) in cw1200_rx_cb()
1210 skb_queue_tail(&entry->rx_queue, skb); in cw1200_rx_cb()
1212 ieee80211_rx_irqsafe(priv->hw, skb); in cw1200_rx_cb()
1213 spin_unlock_bh(&priv->ps_state_lock); in cw1200_rx_cb()
1215 ieee80211_rx_irqsafe(priv->hw, skb); in cw1200_rx_cb()
1233 idx = ffs(~priv->key_map) - 1; in cw1200_alloc_key()
1235 return -1; in cw1200_alloc_key()
1237 priv->key_map |= BIT(idx); in cw1200_alloc_key()
1238 priv->keys[idx].index = idx; in cw1200_alloc_key()
1244 BUG_ON(!(priv->key_map & BIT(idx))); in cw1200_free_key()
1245 memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); in cw1200_free_key()
1246 priv->key_map &= ~BIT(idx); in cw1200_free_key()
1251 memset(&priv->keys, 0, sizeof(priv->keys)); in cw1200_free_keys()
1252 priv->key_map = 0; in cw1200_free_keys()
1259 if (priv->key_map & BIT(idx)) { in cw1200_upload_keys()
1260 ret = wsm_add_key(priv, &priv->keys[idx]); in cw1200_upload_keys()
1274 if (!priv->action_linkid) { in cw1200_link_id_reset()
1277 &priv->action_frame_sa[0]); in cw1200_link_id_reset()
1281 flush_workqueue(priv->workqueue); in cw1200_link_id_reset()
1283 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_reset()
1284 priv->link_id_db[temp_linkid - 1].prev_status = in cw1200_link_id_reset()
1285 priv->link_id_db[temp_linkid - 1].status; in cw1200_link_id_reset()
1286 priv->link_id_db[temp_linkid - 1].status = in cw1200_link_id_reset()
1288 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_reset()
1290 if (queue_work(priv->workqueue, in cw1200_link_id_reset()
1291 &priv->link_id_work) <= 0) in cw1200_link_id_reset()
1295 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_reset()
1296 priv->link_id_db[priv->action_linkid - 1].prev_status = in cw1200_link_id_reset()
1297 priv->link_id_db[priv->action_linkid - 1].status; in cw1200_link_id_reset()
1298 priv->link_id_db[priv->action_linkid - 1].status = in cw1200_link_id_reset()
1300 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_reset()
1302 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) in cw1200_link_id_reset()
1304 flush_workqueue(priv->workqueue); in cw1200_link_id_reset()
1311 spin_lock_bh(&priv->ps_state_lock); in cw1200_find_link_id()
1313 if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && in cw1200_find_link_id()
1314 priv->link_id_db[i].status) { in cw1200_find_link_id()
1315 priv->link_id_db[i].timestamp = jiffies; in cw1200_find_link_id()
1320 spin_unlock_bh(&priv->ps_state_lock); in cw1200_find_link_id()
1330 spin_lock_bh(&priv->ps_state_lock); in cw1200_alloc_link_id()
1332 if (!priv->link_id_db[i].status) { in cw1200_alloc_link_id()
1335 } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && in cw1200_alloc_link_id()
1336 !priv->tx_queue_stats.link_map_cache[i + 1]) { in cw1200_alloc_link_id()
1338 now - priv->link_id_db[i].timestamp; in cw1200_alloc_link_id()
1346 struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; in cw1200_alloc_link_id()
1348 entry->status = CW1200_LINK_RESERVE; in cw1200_alloc_link_id()
1349 memcpy(&entry->mac, mac, ETH_ALEN); in cw1200_alloc_link_id()
1350 memset(&entry->buffered, 0, CW1200_MAX_TID); in cw1200_alloc_link_id()
1351 skb_queue_head_init(&entry->rx_queue); in cw1200_alloc_link_id()
1353 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) in cw1200_alloc_link_id()
1356 wiphy_info(priv->hw->wiphy, in cw1200_alloc_link_id()
1360 spin_unlock_bh(&priv->ps_state_lock); in cw1200_alloc_link_id()
1369 cw1200_link_id_gc_work(&priv->link_id_gc_work.work); in cw1200_link_id_work()
1384 unsigned long next_gc = -1; in cw1200_link_id_gc_work()
1390 if (priv->join_status != CW1200_JOIN_STATUS_AP) in cw1200_link_id_gc_work()
1394 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1398 if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || in cw1200_link_id_gc_work()
1399 (priv->link_id_db[i].status == CW1200_LINK_HARD && in cw1200_link_id_gc_work()
1400 !(priv->link_id_map & mask))) { in cw1200_link_id_gc_work()
1401 if (priv->link_id_map & mask) { in cw1200_link_id_gc_work()
1402 priv->sta_asleep_mask &= ~mask; in cw1200_link_id_gc_work()
1403 priv->pspoll_mask &= ~mask; in cw1200_link_id_gc_work()
1406 priv->link_id_map |= mask; in cw1200_link_id_gc_work()
1407 if (priv->link_id_db[i].status != CW1200_LINK_HARD) in cw1200_link_id_gc_work()
1408 priv->link_id_db[i].status = CW1200_LINK_SOFT; in cw1200_link_id_gc_work()
1409 memcpy(map_link.mac_addr, priv->link_id_db[i].mac, in cw1200_link_id_gc_work()
1411 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1419 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1420 } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { in cw1200_link_id_gc_work()
1421 ttl = priv->link_id_db[i].timestamp - now + in cw1200_link_id_gc_work()
1425 priv->link_id_db[i].status = CW1200_LINK_OFF; in cw1200_link_id_gc_work()
1426 priv->link_id_map &= ~mask; in cw1200_link_id_gc_work()
1427 priv->sta_asleep_mask &= ~mask; in cw1200_link_id_gc_work()
1428 priv->pspoll_mask &= ~mask; in cw1200_link_id_gc_work()
1430 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1433 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1437 } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || in cw1200_link_id_gc_work()
1438 priv->link_id_db[i].status == in cw1200_link_id_gc_work()
1440 int status = priv->link_id_db[i].status; in cw1200_link_id_gc_work()
1441 priv->link_id_db[i].status = in cw1200_link_id_gc_work()
1442 priv->link_id_db[i].prev_status; in cw1200_link_id_gc_work()
1443 priv->link_id_db[i].timestamp = now; in cw1200_link_id_gc_work()
1445 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1449 priv->link_id_db[i].mac, in cw1200_link_id_gc_work()
1456 spin_lock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1459 skb_queue_purge(&priv->link_id_db[i].rx_queue); in cw1200_link_id_gc_work()
1464 spin_unlock_bh(&priv->ps_state_lock); in cw1200_link_id_gc_work()
1465 if (next_gc != -1) in cw1200_link_id_gc_work()
1466 queue_delayed_work(priv->workqueue, in cw1200_link_id_gc_work()
1467 &priv->link_id_gc_work, next_gc); in cw1200_link_id_gc_work()