Lines Matching full:dev
16 struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); in mt76x02_pre_tbtt_tasklet() local
17 struct mt76_dev *mdev = &dev->mt76; in mt76x02_pre_tbtt_tasklet()
18 struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; in mt76x02_pre_tbtt_tasklet()
23 if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) in mt76x02_pre_tbtt_tasklet()
26 mt76x02_resync_beacon_timer(dev); in mt76x02_pre_tbtt_tasklet()
29 mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); in mt76x02_pre_tbtt_tasklet()
30 dev->beacon_data_count = 0; in mt76x02_pre_tbtt_tasklet()
32 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), in mt76x02_pre_tbtt_tasklet()
34 mt76x02_update_beacon_iter, dev); in mt76x02_pre_tbtt_tasklet()
36 mt76_wr(dev, MT_BCN_BYPASS_MASK, in mt76x02_pre_tbtt_tasklet()
37 0xff00 | ~(0xff00 >> dev->beacon_data_count)); in mt76x02_pre_tbtt_tasklet()
44 mt76x02_enqueue_buffered_bc(dev, &data, 8); in mt76x02_pre_tbtt_tasklet()
62 mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL); in mt76x02_pre_tbtt_tasklet()
67 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_pre_tbtt_enable() argument
70 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
72 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
75 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_beacon_enable() argument
77 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); in mt76x02e_beacon_enable()
79 mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
81 mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
84 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) in mt76x02e_init_beacon_config() argument
93 dev->beacon_ops = &beacon_ops; in mt76x02e_init_beacon_config()
96 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, in mt76x02e_init_beacon_config()
98 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, in mt76x02e_init_beacon_config()
100 mt76_wr(dev, MT_INT_TIMER_EN, 0); in mt76x02e_init_beacon_config()
102 mt76x02_init_beacon_config(dev); in mt76x02e_init_beacon_config()
107 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, in mt76x02_init_rx_queue() argument
112 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, in mt76x02_init_rx_queue()
117 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); in mt76x02_init_rx_queue()
122 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) in mt76x02_process_tx_status_fifo() argument
127 while (kfifo_get(&dev->txstatus_fifo, &stat)) in mt76x02_process_tx_status_fifo()
128 mt76x02_send_tx_status(dev, &stat, &update); in mt76x02_process_tx_status_fifo()
133 struct mt76x02_dev *dev; in mt76x02_tx_worker() local
135 dev = container_of(w, struct mt76x02_dev, mt76.tx_worker); in mt76x02_tx_worker()
137 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_tx_worker()
138 mt76x02_process_tx_status_fifo(dev); in mt76x02_tx_worker()
140 mt76_txq_schedule_all(&dev->mphy); in mt76x02_tx_worker()
145 struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, in mt76x02_poll_tx() local
149 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_poll_tx()
151 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
153 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
156 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); in mt76x02_poll_tx()
158 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); in mt76x02_poll_tx()
160 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); in mt76x02_poll_tx()
162 mt76_worker_schedule(&dev->mt76.tx_worker); in mt76x02_poll_tx()
167 int mt76x02_dma_init(struct mt76x02_dev *dev) in mt76x02_dma_init() argument
177 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); in mt76x02_dma_init()
181 dev->mt76.tx_worker.fn = mt76x02_tx_worker; in mt76x02_dma_init()
182 tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet); in mt76x02_dma_init()
184 spin_lock_init(&dev->txstatus_fifo_lock); in mt76x02_dma_init()
185 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); in mt76x02_dma_init()
187 mt76_dma_attach(&dev->mt76); in mt76x02_dma_init()
189 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); in mt76x02_dma_init()
192 ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), in mt76x02_dma_init()
199 ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, in mt76x02_dma_init()
204 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, in mt76x02_dma_init()
209 mt76x02_irq_enable(dev, in mt76x02_dma_init()
217 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, in mt76x02_dma_init()
222 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; in mt76x02_dma_init()
224 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, in mt76x02_dma_init()
229 ret = mt76_init_queues(dev, mt76_dma_rx_poll); in mt76x02_dma_init()
233 netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, in mt76x02_dma_init()
235 napi_enable(&dev->mt76.tx_napi); in mt76x02_dma_init()
243 struct mt76x02_dev *dev; in mt76x02_rx_poll_complete() local
245 dev = container_of(mdev, struct mt76x02_dev, mt76); in mt76x02_rx_poll_complete()
246 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); in mt76x02_rx_poll_complete()
252 struct mt76x02_dev *dev = dev_instance; in mt76x02_irq_handler() local
255 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); in mt76x02_irq_handler()
256 intr &= dev->mt76.mmio.irqmask; in mt76x02_irq_handler()
257 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); in mt76x02_irq_handler()
259 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) in mt76x02_irq_handler()
262 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); in mt76x02_irq_handler()
268 mt76x02_irq_disable(dev, mask); in mt76x02_irq_handler()
271 napi_schedule(&dev->mt76.napi[0]); in mt76x02_irq_handler()
274 napi_schedule(&dev->mt76.napi[1]); in mt76x02_irq_handler()
277 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet); in mt76x02_irq_handler()
281 if (dev->mt76.csa_complete) in mt76x02_irq_handler()
282 mt76_csa_finish(&dev->mt76); in mt76x02_irq_handler()
284 mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]); in mt76x02_irq_handler()
288 mt76x02_mac_poll_tx_status(dev, true); in mt76x02_irq_handler()
291 napi_schedule(&dev->mt76.tx_napi); in mt76x02_irq_handler()
294 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); in mt76x02_irq_handler()
300 static void mt76x02_dma_enable(struct mt76x02_dev *dev) in mt76x02_dma_enable() argument
304 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); in mt76x02_dma_enable()
305 mt76x02_wait_for_wpdma(&dev->mt76, 1000); in mt76x02_dma_enable()
311 mt76_set(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_enable()
312 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_dma_enable()
316 void mt76x02_dma_disable(struct mt76x02_dev *dev) in mt76x02_dma_disable() argument
318 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); in mt76x02_dma_disable()
324 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_disable()
328 void mt76x02_mac_start(struct mt76x02_dev *dev) in mt76x02_mac_start() argument
330 mt76x02_mac_reset_counters(dev); in mt76x02_mac_start()
331 mt76x02_dma_enable(dev); in mt76x02_mac_start()
332 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); in mt76x02_mac_start()
333 mt76_wr(dev, MT_MAC_SYS_CTRL, in mt76x02_mac_start()
336 mt76x02_irq_enable(dev, in mt76x02_mac_start()
342 static bool mt76x02_tx_hang(struct mt76x02_dev *dev) in mt76x02_tx_hang() argument
349 q = dev->mphy.q_tx[i]; in mt76x02_tx_hang()
354 prev_dma_idx = dev->mt76.tx_dma_idx[i]; in mt76x02_tx_hang()
356 dev->mt76.tx_dma_idx[i] = dma_idx; in mt76x02_tx_hang()
369 struct mt76x02_dev *dev = hw->priv; in mt76x02_key_sync() local
380 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); in mt76x02_key_sync()
383 static void mt76x02_reset_state(struct mt76x02_dev *dev) in mt76x02_reset_state() argument
387 lockdep_assert_held(&dev->mt76.mutex); in mt76x02_reset_state()
389 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); in mt76x02_reset_state()
392 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); in mt76x02_reset_state()
402 wcid = rcu_dereference_protected(dev->mt76.wcid[i], in mt76x02_reset_state()
403 lockdep_is_held(&dev->mt76.mutex)); in mt76x02_reset_state()
407 rcu_assign_pointer(dev->mt76.wcid[i], NULL); in mt76x02_reset_state()
415 __mt76_sta_remove(&dev->mt76, vif, sta); in mt76x02_reset_state()
419 dev->mt76.vif_mask = 0; in mt76x02_reset_state()
420 dev->mt76.beacon_mask = 0; in mt76x02_reset_state()
423 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) in mt76x02_watchdog_reset() argument
425 u32 mask = dev->mt76.mmio.irqmask; in mt76x02_watchdog_reset()
426 bool restart = dev->mt76.mcu_ops->mcu_restart; in mt76x02_watchdog_reset()
429 ieee80211_stop_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
430 set_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
432 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
433 mt76_worker_disable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
434 napi_disable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
436 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
437 napi_disable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
440 mutex_lock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
442 dev->mcu_timeout = 0; in mt76x02_watchdog_reset()
444 mt76x02_reset_state(dev); in mt76x02_watchdog_reset()
446 if (dev->mt76.beacon_mask) in mt76x02_watchdog_reset()
447 mt76_clear(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
451 mt76x02_irq_disable(dev, mask); in mt76x02_watchdog_reset()
454 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
455 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); in mt76x02_watchdog_reset()
456 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_watchdog_reset()
459 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); in mt76x02_watchdog_reset()
462 mt76_set(dev, 0x734, 0x3); in mt76x02_watchdog_reset()
465 mt76_mcu_restart(dev); in mt76x02_watchdog_reset()
467 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); in mt76x02_watchdog_reset()
469 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); in mt76x02_watchdog_reset()
471 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
472 mt76_queue_rx_reset(dev, i); in mt76x02_watchdog_reset()
475 mt76_tx_status_check(&dev->mt76, NULL, true); in mt76x02_watchdog_reset()
477 mt76x02_mac_start(dev); in mt76x02_watchdog_reset()
479 if (dev->ed_monitor) in mt76x02_watchdog_reset()
480 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
482 if (dev->mt76.beacon_mask && !restart) in mt76x02_watchdog_reset()
483 mt76_set(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
487 mt76x02_irq_enable(dev, mask); in mt76x02_watchdog_reset()
489 mutex_unlock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
491 clear_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
493 mt76_worker_enable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
494 napi_enable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
495 napi_schedule(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
497 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
499 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
500 napi_enable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
501 napi_schedule(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
505 set_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_watchdog_reset()
506 mt76x02_mcu_function_select(dev, Q_SELECT, 1); in mt76x02_watchdog_reset()
507 ieee80211_restart_hw(dev->mt76.hw); in mt76x02_watchdog_reset()
509 ieee80211_wake_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
510 mt76_txq_schedule_all(&dev->mphy); in mt76x02_watchdog_reset()
517 struct mt76x02_dev *dev = hw->priv; in mt76x02_reconfig_complete() local
522 clear_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_reconfig_complete()
526 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) in mt76x02_check_tx_hang() argument
528 if (test_bit(MT76_RESTART, &dev->mphy.state)) in mt76x02_check_tx_hang()
531 if (mt76x02_tx_hang(dev)) { in mt76x02_check_tx_hang()
532 if (++dev->tx_hang_check >= MT_TX_HANG_TH) in mt76x02_check_tx_hang()
535 dev->tx_hang_check = 0; in mt76x02_check_tx_hang()
538 if (dev->mcu_timeout) in mt76x02_check_tx_hang()
544 mt76x02_watchdog_reset(dev); in mt76x02_check_tx_hang()
546 dev->tx_hang_reset++; in mt76x02_check_tx_hang()
547 dev->tx_hang_check = 0; in mt76x02_check_tx_hang()
548 memset(dev->mt76.tx_dma_idx, 0xff, in mt76x02_check_tx_hang()
549 sizeof(dev->mt76.tx_dma_idx)); in mt76x02_check_tx_hang()
554 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, in mt76x02_wdt_work() local
557 mt76x02_check_tx_hang(dev); in mt76x02_wdt_work()
559 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, in mt76x02_wdt_work()