1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
35 
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery     = -1;
40 
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 					 struct ieee80211_vif *vif,
43 					 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
48 {
49 	int ret;
50 
51 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
52 		return -EINVAL;
53 
54 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
55 		return 0;
56 
57 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
58 		return 0;
59 
60 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
61 	if (ret < 0)
62 		return ret;
63 
64 	wl1271_info("Association completed.");
65 	return 0;
66 }
67 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 			      struct regulatory_request *request)
70 {
71 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 	struct wl1271 *wl = hw->priv;
73 
74 	/* copy the current dfs region */
75 	if (request)
76 		wl->dfs_region = request->dfs_region;
77 
78 	wlcore_regdomain_config(wl);
79 }
80 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
82 				   bool enable)
83 {
84 	int ret = 0;
85 
86 	/* we should hold wl->mutex */
87 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
88 	if (ret < 0)
89 		goto out;
90 
91 	if (enable)
92 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 	else
94 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
95 out:
96 	return ret;
97 }
98 
99 /*
100  * this function is being called when the rx_streaming interval
101  * has beed changed or rx_streaming should be disabled
102  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
104 {
105 	int ret = 0;
106 	int period = wl->conf.rx_streaming.interval;
107 
108 	/* don't reconfigure if rx_streaming is disabled */
109 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
110 		goto out;
111 
112 	/* reconfigure/disable according to new streaming_period */
113 	if (period &&
114 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 	    (wl->conf.rx_streaming.always ||
116 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 	else {
119 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 		/* don't cancel_work_sync since we might deadlock */
121 		del_timer_sync(&wlvif->rx_streaming_timer);
122 	}
123 out:
124 	return ret;
125 }
126 
wl1271_rx_streaming_enable_work(struct work_struct * work)127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
128 {
129 	int ret;
130 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 						rx_streaming_enable_work);
132 	struct wl1271 *wl = wlvif->wl;
133 
134 	mutex_lock(&wl->mutex);
135 
136 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 	    (!wl->conf.rx_streaming.always &&
139 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
140 		goto out;
141 
142 	if (!wl->conf.rx_streaming.interval)
143 		goto out;
144 
145 	ret = pm_runtime_get_sync(wl->dev);
146 	if (ret < 0) {
147 		pm_runtime_put_noidle(wl->dev);
148 		goto out;
149 	}
150 
151 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 	if (ret < 0)
153 		goto out_sleep;
154 
155 	/* stop it after some time of inactivity */
156 	mod_timer(&wlvif->rx_streaming_timer,
157 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
158 
159 out_sleep:
160 	pm_runtime_mark_last_busy(wl->dev);
161 	pm_runtime_put_autosuspend(wl->dev);
162 out:
163 	mutex_unlock(&wl->mutex);
164 }
165 
wl1271_rx_streaming_disable_work(struct work_struct * work)166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
167 {
168 	int ret;
169 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 						rx_streaming_disable_work);
171 	struct wl1271 *wl = wlvif->wl;
172 
173 	mutex_lock(&wl->mutex);
174 
175 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
176 		goto out;
177 
178 	ret = pm_runtime_get_sync(wl->dev);
179 	if (ret < 0) {
180 		pm_runtime_put_noidle(wl->dev);
181 		goto out;
182 	}
183 
184 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
185 	if (ret)
186 		goto out_sleep;
187 
188 out_sleep:
189 	pm_runtime_mark_last_busy(wl->dev);
190 	pm_runtime_put_autosuspend(wl->dev);
191 out:
192 	mutex_unlock(&wl->mutex);
193 }
194 
wl1271_rx_streaming_timer(struct timer_list * t)195 static void wl1271_rx_streaming_timer(struct timer_list *t)
196 {
197 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 	struct wl1271 *wl = wlvif->wl;
199 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
200 }
201 
202 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
204 {
205 	/* if the watchdog is not armed, don't do anything */
206 	if (wl->tx_allocated_blocks == 0)
207 		return;
208 
209 	cancel_delayed_work(&wl->tx_watchdog_work);
210 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
212 }
213 
wlcore_rc_update_work(struct work_struct * work)214 static void wlcore_rc_update_work(struct work_struct *work)
215 {
216 	int ret;
217 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 						rc_update_work);
219 	struct wl1271 *wl = wlvif->wl;
220 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
221 
222 	mutex_lock(&wl->mutex);
223 
224 	if (unlikely(wl->state != WLCORE_STATE_ON))
225 		goto out;
226 
227 	ret = pm_runtime_get_sync(wl->dev);
228 	if (ret < 0) {
229 		pm_runtime_put_noidle(wl->dev);
230 		goto out;
231 	}
232 
233 	if (ieee80211_vif_is_mesh(vif)) {
234 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 						     true, wlvif->sta.hlid);
236 		if (ret < 0)
237 			goto out_sleep;
238 	} else {
239 		wlcore_hw_sta_rc_update(wl, wlvif);
240 	}
241 
242 out_sleep:
243 	pm_runtime_mark_last_busy(wl->dev);
244 	pm_runtime_put_autosuspend(wl->dev);
245 out:
246 	mutex_unlock(&wl->mutex);
247 }
248 
wl12xx_tx_watchdog_work(struct work_struct * work)249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
250 {
251 	struct delayed_work *dwork;
252 	struct wl1271 *wl;
253 
254 	dwork = to_delayed_work(work);
255 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
256 
257 	mutex_lock(&wl->mutex);
258 
259 	if (unlikely(wl->state != WLCORE_STATE_ON))
260 		goto out;
261 
262 	/* Tx went out in the meantime - everything is ok */
263 	if (unlikely(wl->tx_allocated_blocks == 0))
264 		goto out;
265 
266 	/*
267 	 * if a ROC is in progress, we might not have any Tx for a long
268 	 * time (e.g. pending Tx on the non-ROC channels)
269 	 */
270 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 			     wl->conf.tx.tx_watchdog_timeout);
273 		wl12xx_rearm_tx_watchdog_locked(wl);
274 		goto out;
275 	}
276 
277 	/*
278 	 * if a scan is in progress, we might not have any Tx for a long
279 	 * time
280 	 */
281 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 			     wl->conf.tx.tx_watchdog_timeout);
284 		wl12xx_rearm_tx_watchdog_locked(wl);
285 		goto out;
286 	}
287 
288 	/*
289 	* AP might cache a frame for a long time for a sleeping station,
290 	* so rearm the timer if there's an AP interface with stations. If
291 	* Tx is genuinely stuck we will most hopefully discover it when all
292 	* stations are removed due to inactivity.
293 	*/
294 	if (wl->active_sta_count) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 			     " %d stations",
297 			      wl->conf.tx.tx_watchdog_timeout,
298 			      wl->active_sta_count);
299 		wl12xx_rearm_tx_watchdog_locked(wl);
300 		goto out;
301 	}
302 
303 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 		     wl->conf.tx.tx_watchdog_timeout);
305 	wl12xx_queue_recovery_work(wl);
306 
307 out:
308 	mutex_unlock(&wl->mutex);
309 }
310 
wlcore_adjust_conf(struct wl1271 * wl)311 static void wlcore_adjust_conf(struct wl1271 *wl)
312 {
313 
314 	if (fwlog_param) {
315 		if (!strcmp(fwlog_param, "continuous")) {
316 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 		} else if (!strcmp(fwlog_param, "dbgpins")) {
319 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 		} else if (!strcmp(fwlog_param, "disable")) {
322 			wl->conf.fwlog.mem_blocks = 0;
323 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 		} else {
325 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
326 		}
327 	}
328 
329 	if (bug_on_recovery != -1)
330 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
331 
332 	if (no_recovery != -1)
333 		wl->conf.recovery.no_recovery = (u8) no_recovery;
334 }
335 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 					struct wl12xx_vif *wlvif,
338 					u8 hlid, u8 tx_pkts)
339 {
340 	bool fw_ps;
341 
342 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
343 
344 	/*
345 	 * Wake up from high level PS if the STA is asleep with too little
346 	 * packets in FW or if the STA is awake.
347 	 */
348 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 		wl12xx_ps_link_end(wl, wlvif, hlid);
350 
351 	/*
352 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 	 * Make an exception if this is the only connected link. In this
354 	 * case FW-memory congestion is less of a problem.
355 	 * Note that a single connected STA means 2*ap_count + 1 active links,
356 	 * since we must account for the global and broadcast AP links
357 	 * for each AP. The "fw_ps" check assures us the other link is a STA
358 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
359 	 */
360 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
363 }
364 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 					   struct wl12xx_vif *wlvif,
367 					   struct wl_fw_status *status)
368 {
369 	unsigned long cur_fw_ps_map;
370 	u8 hlid;
371 
372 	cur_fw_ps_map = status->link_ps_bitmap;
373 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 		wl1271_debug(DEBUG_PSM,
375 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 			     wl->ap_fw_ps_map, cur_fw_ps_map,
377 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
378 
379 		wl->ap_fw_ps_map = cur_fw_ps_map;
380 	}
381 
382 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 					    wl->links[hlid].allocated_pkts);
385 }
386 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
388 {
389 	struct wl12xx_vif *wlvif;
390 	u32 old_tx_blk_count = wl->tx_blocks_available;
391 	int avail, freed_blocks;
392 	int i;
393 	int ret;
394 	struct wl1271_link *lnk;
395 
396 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 				   wl->raw_fw_status,
398 				   wl->fw_status_len, false);
399 	if (ret < 0)
400 		return ret;
401 
402 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
403 
404 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 		     "drv_rx_counter = %d, tx_results_counter = %d)",
406 		     status->intr,
407 		     status->fw_rx_counter,
408 		     status->drv_rx_counter,
409 		     status->tx_results_counter);
410 
411 	for (i = 0; i < NUM_TX_QUEUES; i++) {
412 		/* prevent wrap-around in freed-packets counter */
413 		wl->tx_allocated_pkts[i] -=
414 				(status->counters.tx_released_pkts[i] -
415 				wl->tx_pkts_freed[i]) & 0xff;
416 
417 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
418 	}
419 
420 
421 	for_each_set_bit(i, wl->links_map, wl->num_links) {
422 		u8 diff;
423 		lnk = &wl->links[i];
424 
425 		/* prevent wrap-around in freed-packets counter */
426 		diff = (status->counters.tx_lnk_free_pkts[i] -
427 		       lnk->prev_freed_pkts) & 0xff;
428 
429 		if (diff == 0)
430 			continue;
431 
432 		lnk->allocated_pkts -= diff;
433 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
434 
435 		/* accumulate the prev_freed_pkts counter */
436 		lnk->total_freed_pkts += diff;
437 	}
438 
439 	/* prevent wrap-around in total blocks counter */
440 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 		freed_blocks = status->total_released_blks -
442 			       wl->tx_blocks_freed;
443 	else
444 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 			       status->total_released_blks;
446 
447 	wl->tx_blocks_freed = status->total_released_blks;
448 
449 	wl->tx_allocated_blocks -= freed_blocks;
450 
451 	/*
452 	 * If the FW freed some blocks:
453 	 * If we still have allocated blocks - re-arm the timer, Tx is
454 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
455 	 */
456 	if (freed_blocks) {
457 		if (wl->tx_allocated_blocks)
458 			wl12xx_rearm_tx_watchdog_locked(wl);
459 		else
460 			cancel_delayed_work(&wl->tx_watchdog_work);
461 	}
462 
463 	avail = status->tx_total - wl->tx_allocated_blocks;
464 
465 	/*
466 	 * The FW might change the total number of TX memblocks before
467 	 * we get a notification about blocks being released. Thus, the
468 	 * available blocks calculation might yield a temporary result
469 	 * which is lower than the actual available blocks. Keeping in
470 	 * mind that only blocks that were allocated can be moved from
471 	 * TX to RX, tx_blocks_available should never decrease here.
472 	 */
473 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 				      avail);
475 
476 	/* if more blocks are available now, tx work can be scheduled */
477 	if (wl->tx_blocks_available > old_tx_blk_count)
478 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 
480 	/* for AP update num of allocated TX blocks per link and ps status */
481 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 		wl12xx_irq_update_links_status(wl, wlvif, status);
483 	}
484 
485 	/* update the host-chipset time offset */
486 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
487 		(s64)(status->fw_localtime);
488 
489 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
490 
491 	return 0;
492 }
493 
wl1271_flush_deferred_work(struct wl1271 * wl)494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 	struct sk_buff *skb;
497 
498 	/* Pass all received frames to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 		ieee80211_rx_ni(wl->hw, skb);
501 
502 	/* Return sent skbs to the network stack */
503 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 		ieee80211_tx_status_ni(wl->hw, skb);
505 }
506 
wl1271_netstack_work(struct work_struct * work)507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 	struct wl1271 *wl =
510 		container_of(work, struct wl1271, netstack_work);
511 
512 	do {
513 		wl1271_flush_deferred_work(wl);
514 	} while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516 
517 #define WL1271_IRQ_MAX_LOOPS 256
518 
wlcore_irq_locked(struct wl1271 * wl)519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 	int ret = 0;
522 	u32 intr;
523 	int loopcount = WL1271_IRQ_MAX_LOOPS;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = pm_runtime_get_sync(wl->dev);
541 	if (ret < 0) {
542 		pm_runtime_put_noidle(wl->dev);
543 		goto out;
544 	}
545 
546 	while (!done && loopcount--) {
547 		/*
548 		 * In order to avoid a race with the hardirq, clear the flag
549 		 * before acknowledging the chip.
550 		 */
551 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
552 		smp_mb__after_atomic();
553 
554 		ret = wlcore_fw_status(wl, wl->fw_status);
555 		if (ret < 0)
556 			goto out;
557 
558 		wlcore_hw_tx_immediate_compl(wl);
559 
560 		intr = wl->fw_status->intr;
561 		intr &= WLCORE_ALL_INTR_MASK;
562 		if (!intr) {
563 			done = true;
564 			continue;
565 		}
566 
567 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
568 			wl1271_error("HW watchdog interrupt received! starting recovery.");
569 			wl->watchdog_recovery = true;
570 			ret = -EIO;
571 
572 			/* restarting the chip. ignore any other interrupt. */
573 			goto out;
574 		}
575 
576 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
577 			wl1271_error("SW watchdog interrupt received! "
578 				     "starting recovery.");
579 			wl->watchdog_recovery = true;
580 			ret = -EIO;
581 
582 			/* restarting the chip. ignore any other interrupt. */
583 			goto out;
584 		}
585 
586 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
587 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
588 
589 			ret = wlcore_rx(wl, wl->fw_status);
590 			if (ret < 0)
591 				goto out;
592 
593 			/* Check if any tx blocks were freed */
594 			spin_lock_irqsave(&wl->wl_lock, flags);
595 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
596 			    wl1271_tx_total_queue_count(wl) > 0) {
597 				spin_unlock_irqrestore(&wl->wl_lock, flags);
598 				/*
599 				 * In order to avoid starvation of the TX path,
600 				 * call the work function directly.
601 				 */
602 				ret = wlcore_tx_work_locked(wl);
603 				if (ret < 0)
604 					goto out;
605 			} else {
606 				spin_unlock_irqrestore(&wl->wl_lock, flags);
607 			}
608 
609 			/* check for tx results */
610 			ret = wlcore_hw_tx_delayed_compl(wl);
611 			if (ret < 0)
612 				goto out;
613 
614 			/* Make sure the deferred queues don't get too long */
615 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
616 				      skb_queue_len(&wl->deferred_rx_queue);
617 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
618 				wl1271_flush_deferred_work(wl);
619 		}
620 
621 		if (intr & WL1271_ACX_INTR_EVENT_A) {
622 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
623 			ret = wl1271_event_handle(wl, 0);
624 			if (ret < 0)
625 				goto out;
626 		}
627 
628 		if (intr & WL1271_ACX_INTR_EVENT_B) {
629 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
630 			ret = wl1271_event_handle(wl, 1);
631 			if (ret < 0)
632 				goto out;
633 		}
634 
635 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
636 			wl1271_debug(DEBUG_IRQ,
637 				     "WL1271_ACX_INTR_INIT_COMPLETE");
638 
639 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
640 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
641 	}
642 
643 	pm_runtime_mark_last_busy(wl->dev);
644 	pm_runtime_put_autosuspend(wl->dev);
645 
646 out:
647 	return ret;
648 }
649 
wlcore_irq(int irq,void * cookie)650 static irqreturn_t wlcore_irq(int irq, void *cookie)
651 {
652 	int ret;
653 	unsigned long flags;
654 	struct wl1271 *wl = cookie;
655 
656 	/* complete the ELP completion */
657 	spin_lock_irqsave(&wl->wl_lock, flags);
658 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
659 	if (wl->elp_compl) {
660 		complete(wl->elp_compl);
661 		wl->elp_compl = NULL;
662 	}
663 
664 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
665 		/* don't enqueue a work right now. mark it as pending */
666 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
667 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
668 		disable_irq_nosync(wl->irq);
669 		pm_wakeup_event(wl->dev, 0);
670 		spin_unlock_irqrestore(&wl->wl_lock, flags);
671 		return IRQ_HANDLED;
672 	}
673 	spin_unlock_irqrestore(&wl->wl_lock, flags);
674 
675 	/* TX might be handled here, avoid redundant work */
676 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
677 	cancel_work_sync(&wl->tx_work);
678 
679 	mutex_lock(&wl->mutex);
680 
681 	ret = wlcore_irq_locked(wl);
682 	if (ret)
683 		wl12xx_queue_recovery_work(wl);
684 
685 	spin_lock_irqsave(&wl->wl_lock, flags);
686 	/* In case TX was not handled here, queue TX work */
687 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
688 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
689 	    wl1271_tx_total_queue_count(wl) > 0)
690 		ieee80211_queue_work(wl->hw, &wl->tx_work);
691 	spin_unlock_irqrestore(&wl->wl_lock, flags);
692 
693 	mutex_unlock(&wl->mutex);
694 
695 	return IRQ_HANDLED;
696 }
697 
698 struct vif_counter_data {
699 	u8 counter;
700 
701 	struct ieee80211_vif *cur_vif;
702 	bool cur_vif_running;
703 };
704 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)705 static void wl12xx_vif_count_iter(void *data, u8 *mac,
706 				  struct ieee80211_vif *vif)
707 {
708 	struct vif_counter_data *counter = data;
709 
710 	counter->counter++;
711 	if (counter->cur_vif == vif)
712 		counter->cur_vif_running = true;
713 }
714 
715 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)716 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
717 			       struct ieee80211_vif *cur_vif,
718 			       struct vif_counter_data *data)
719 {
720 	memset(data, 0, sizeof(*data));
721 	data->cur_vif = cur_vif;
722 
723 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
724 					    wl12xx_vif_count_iter, data);
725 }
726 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)727 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
728 {
729 	const struct firmware *fw;
730 	const char *fw_name;
731 	enum wl12xx_fw_type fw_type;
732 	int ret;
733 
734 	if (plt) {
735 		fw_type = WL12XX_FW_TYPE_PLT;
736 		fw_name = wl->plt_fw_name;
737 	} else {
738 		/*
739 		 * we can't call wl12xx_get_vif_count() here because
740 		 * wl->mutex is taken, so use the cached last_vif_count value
741 		 */
742 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
743 			fw_type = WL12XX_FW_TYPE_MULTI;
744 			fw_name = wl->mr_fw_name;
745 		} else {
746 			fw_type = WL12XX_FW_TYPE_NORMAL;
747 			fw_name = wl->sr_fw_name;
748 		}
749 	}
750 
751 	if (wl->fw_type == fw_type)
752 		return 0;
753 
754 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
755 
756 	ret = request_firmware(&fw, fw_name, wl->dev);
757 
758 	if (ret < 0) {
759 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
760 		return ret;
761 	}
762 
763 	if (fw->size % 4) {
764 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
765 			     fw->size);
766 		ret = -EILSEQ;
767 		goto out;
768 	}
769 
770 	vfree(wl->fw);
771 	wl->fw_type = WL12XX_FW_TYPE_NONE;
772 	wl->fw_len = fw->size;
773 	wl->fw = vmalloc(wl->fw_len);
774 
775 	if (!wl->fw) {
776 		wl1271_error("could not allocate memory for the firmware");
777 		ret = -ENOMEM;
778 		goto out;
779 	}
780 
781 	memcpy(wl->fw, fw->data, wl->fw_len);
782 	ret = 0;
783 	wl->fw_type = fw_type;
784 out:
785 	release_firmware(fw);
786 
787 	return ret;
788 }
789 
wl12xx_queue_recovery_work(struct wl1271 * wl)790 void wl12xx_queue_recovery_work(struct wl1271 *wl)
791 {
792 	/* Avoid a recursive recovery */
793 	if (wl->state == WLCORE_STATE_ON) {
794 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
795 				  &wl->flags));
796 
797 		wl->state = WLCORE_STATE_RESTARTING;
798 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
799 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
800 	}
801 }
802 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)803 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
804 {
805 	size_t len;
806 
807 	/* Make sure we have enough room */
808 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
809 
810 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
811 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
812 	wl->fwlog_size += len;
813 
814 	return len;
815 }
816 
wl12xx_read_fwlog_panic(struct wl1271 * wl)817 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
818 {
819 	u32 end_of_log = 0;
820 	int error;
821 
822 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
823 		return;
824 
825 	wl1271_info("Reading FW panic log");
826 
827 	/*
828 	 * Make sure the chip is awake and the logger isn't active.
829 	 * Do not send a stop fwlog command if the fw is hanged or if
830 	 * dbgpins are used (due to some fw bug).
831 	 */
832 	error = pm_runtime_get_sync(wl->dev);
833 	if (error < 0) {
834 		pm_runtime_put_noidle(wl->dev);
835 		return;
836 	}
837 	if (!wl->watchdog_recovery &&
838 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
839 		wl12xx_cmd_stop_fwlog(wl);
840 
841 	/* Traverse the memory blocks linked list */
842 	do {
843 		end_of_log = wlcore_event_fw_logger(wl);
844 		if (end_of_log == 0) {
845 			msleep(100);
846 			end_of_log = wlcore_event_fw_logger(wl);
847 		}
848 	} while (end_of_log != 0);
849 }
850 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)851 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
852 				   u8 hlid, struct ieee80211_sta *sta)
853 {
854 	struct wl1271_station *wl_sta;
855 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
856 
857 	wl_sta = (void *)sta->drv_priv;
858 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
859 
860 	/*
861 	 * increment the initial seq number on recovery to account for
862 	 * transmitted packets that we haven't yet got in the FW status
863 	 */
864 	if (wlvif->encryption_type == KEY_GEM)
865 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
866 
867 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
868 		wl_sta->total_freed_pkts += sqn_recovery_padding;
869 }
870 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)871 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
872 					struct wl12xx_vif *wlvif,
873 					u8 hlid, const u8 *addr)
874 {
875 	struct ieee80211_sta *sta;
876 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
877 
878 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
879 		    is_zero_ether_addr(addr)))
880 		return;
881 
882 	rcu_read_lock();
883 	sta = ieee80211_find_sta(vif, addr);
884 	if (sta)
885 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
886 	rcu_read_unlock();
887 }
888 
wlcore_print_recovery(struct wl1271 * wl)889 static void wlcore_print_recovery(struct wl1271 *wl)
890 {
891 	u32 pc = 0;
892 	u32 hint_sts = 0;
893 	int ret;
894 
895 	wl1271_info("Hardware recovery in progress. FW ver: %s",
896 		    wl->chip.fw_ver_str);
897 
898 	/* change partitions momentarily so we can read the FW pc */
899 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
900 	if (ret < 0)
901 		return;
902 
903 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
904 	if (ret < 0)
905 		return;
906 
907 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
908 	if (ret < 0)
909 		return;
910 
911 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
912 				pc, hint_sts, ++wl->recovery_count);
913 
914 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
915 }
916 
917 
wl1271_recovery_work(struct work_struct * work)918 static void wl1271_recovery_work(struct work_struct *work)
919 {
920 	struct wl1271 *wl =
921 		container_of(work, struct wl1271, recovery_work);
922 	struct wl12xx_vif *wlvif;
923 	struct ieee80211_vif *vif;
924 	int error;
925 
926 	mutex_lock(&wl->mutex);
927 
928 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 		goto out_unlock;
930 
931 	error = pm_runtime_get_sync(wl->dev);
932 	if (error < 0) {
933 		wl1271_warning("Enable for recovery failed");
934 		pm_runtime_put_noidle(wl->dev);
935 	}
936 	wlcore_disable_interrupts_nosync(wl);
937 
938 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
939 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
940 			wl12xx_read_fwlog_panic(wl);
941 		wlcore_print_recovery(wl);
942 	}
943 
944 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
946 
947 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
948 
949 	if (wl->conf.recovery.no_recovery) {
950 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
951 		goto out_unlock;
952 	}
953 
954 	/* Prevent spurious TX during FW restart */
955 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
956 
957 	/* reboot the chipset */
958 	while (!list_empty(&wl->wlvif_list)) {
959 		wlvif = list_first_entry(&wl->wlvif_list,
960 				       struct wl12xx_vif, list);
961 		vif = wl12xx_wlvif_to_vif(wlvif);
962 
963 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
964 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
965 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
966 						    vif->bss_conf.bssid);
967 		}
968 
969 		__wl1271_op_remove_interface(wl, vif, false);
970 	}
971 
972 	wlcore_op_stop_locked(wl);
973 	pm_runtime_mark_last_busy(wl->dev);
974 	pm_runtime_put_autosuspend(wl->dev);
975 
976 	ieee80211_restart_hw(wl->hw);
977 
978 	/*
979 	 * Its safe to enable TX now - the queues are stopped after a request
980 	 * to restart the HW.
981 	 */
982 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
983 
984 out_unlock:
985 	wl->watchdog_recovery = false;
986 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
987 	mutex_unlock(&wl->mutex);
988 }
989 
wlcore_fw_wakeup(struct wl1271 * wl)990 static int wlcore_fw_wakeup(struct wl1271 *wl)
991 {
992 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
993 }
994 
wl1271_setup(struct wl1271 * wl)995 static int wl1271_setup(struct wl1271 *wl)
996 {
997 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
998 	if (!wl->raw_fw_status)
999 		goto err;
1000 
1001 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1002 	if (!wl->fw_status)
1003 		goto err;
1004 
1005 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1006 	if (!wl->tx_res_if)
1007 		goto err;
1008 
1009 	return 0;
1010 err:
1011 	kfree(wl->fw_status);
1012 	kfree(wl->raw_fw_status);
1013 	return -ENOMEM;
1014 }
1015 
wl12xx_set_power_on(struct wl1271 * wl)1016 static int wl12xx_set_power_on(struct wl1271 *wl)
1017 {
1018 	int ret;
1019 
1020 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1021 	ret = wl1271_power_on(wl);
1022 	if (ret < 0)
1023 		goto out;
1024 	msleep(WL1271_POWER_ON_SLEEP);
1025 	wl1271_io_reset(wl);
1026 	wl1271_io_init(wl);
1027 
1028 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1029 	if (ret < 0)
1030 		goto fail;
1031 
1032 	/* ELP module wake up */
1033 	ret = wlcore_fw_wakeup(wl);
1034 	if (ret < 0)
1035 		goto fail;
1036 
1037 out:
1038 	return ret;
1039 
1040 fail:
1041 	wl1271_power_off(wl);
1042 	return ret;
1043 }
1044 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1045 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1046 {
1047 	int ret = 0;
1048 
1049 	ret = wl12xx_set_power_on(wl);
1050 	if (ret < 0)
1051 		goto out;
1052 
1053 	/*
1054 	 * For wl127x based devices we could use the default block
1055 	 * size (512 bytes), but due to a bug in the sdio driver, we
1056 	 * need to set it explicitly after the chip is powered on.  To
1057 	 * simplify the code and since the performance impact is
1058 	 * negligible, we use the same block size for all different
1059 	 * chip types.
1060 	 *
1061 	 * Check if the bus supports blocksize alignment and, if it
1062 	 * doesn't, make sure we don't have the quirk.
1063 	 */
1064 	if (!wl1271_set_block_size(wl))
1065 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1066 
1067 	/* TODO: make sure the lower driver has set things up correctly */
1068 
1069 	ret = wl1271_setup(wl);
1070 	if (ret < 0)
1071 		goto out;
1072 
1073 	ret = wl12xx_fetch_firmware(wl, plt);
1074 	if (ret < 0) {
1075 		kfree(wl->fw_status);
1076 		kfree(wl->raw_fw_status);
1077 		kfree(wl->tx_res_if);
1078 	}
1079 
1080 out:
1081 	return ret;
1082 }
1083 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1084 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1085 {
1086 	int retries = WL1271_BOOT_RETRIES;
1087 	struct wiphy *wiphy = wl->hw->wiphy;
1088 
1089 	static const char* const PLT_MODE[] = {
1090 		"PLT_OFF",
1091 		"PLT_ON",
1092 		"PLT_FEM_DETECT",
1093 		"PLT_CHIP_AWAKE"
1094 	};
1095 
1096 	int ret;
1097 
1098 	mutex_lock(&wl->mutex);
1099 
1100 	wl1271_notice("power up");
1101 
1102 	if (wl->state != WLCORE_STATE_OFF) {
1103 		wl1271_error("cannot go into PLT state because not "
1104 			     "in off state: %d", wl->state);
1105 		ret = -EBUSY;
1106 		goto out;
1107 	}
1108 
1109 	/* Indicate to lower levels that we are now in PLT mode */
1110 	wl->plt = true;
1111 	wl->plt_mode = plt_mode;
1112 
1113 	while (retries) {
1114 		retries--;
1115 		ret = wl12xx_chip_wakeup(wl, true);
1116 		if (ret < 0)
1117 			goto power_off;
1118 
1119 		if (plt_mode != PLT_CHIP_AWAKE) {
1120 			ret = wl->ops->plt_init(wl);
1121 			if (ret < 0)
1122 				goto power_off;
1123 		}
1124 
1125 		wl->state = WLCORE_STATE_ON;
1126 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1127 			      PLT_MODE[plt_mode],
1128 			      wl->chip.fw_ver_str);
1129 
1130 		/* update hw/fw version info in wiphy struct */
1131 		wiphy->hw_version = wl->chip.id;
1132 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1133 			sizeof(wiphy->fw_version));
1134 
1135 		goto out;
1136 
1137 power_off:
1138 		wl1271_power_off(wl);
1139 	}
1140 
1141 	wl->plt = false;
1142 	wl->plt_mode = PLT_OFF;
1143 
1144 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1145 		     WL1271_BOOT_RETRIES);
1146 out:
1147 	mutex_unlock(&wl->mutex);
1148 
1149 	return ret;
1150 }
1151 
wl1271_plt_stop(struct wl1271 * wl)1152 int wl1271_plt_stop(struct wl1271 *wl)
1153 {
1154 	int ret = 0;
1155 
1156 	wl1271_notice("power down");
1157 
1158 	/*
1159 	 * Interrupts must be disabled before setting the state to OFF.
1160 	 * Otherwise, the interrupt handler might be called and exit without
1161 	 * reading the interrupt status.
1162 	 */
1163 	wlcore_disable_interrupts(wl);
1164 	mutex_lock(&wl->mutex);
1165 	if (!wl->plt) {
1166 		mutex_unlock(&wl->mutex);
1167 
1168 		/*
1169 		 * This will not necessarily enable interrupts as interrupts
1170 		 * may have been disabled when op_stop was called. It will,
1171 		 * however, balance the above call to disable_interrupts().
1172 		 */
1173 		wlcore_enable_interrupts(wl);
1174 
1175 		wl1271_error("cannot power down because not in PLT "
1176 			     "state: %d", wl->state);
1177 		ret = -EBUSY;
1178 		goto out;
1179 	}
1180 
1181 	mutex_unlock(&wl->mutex);
1182 
1183 	wl1271_flush_deferred_work(wl);
1184 	cancel_work_sync(&wl->netstack_work);
1185 	cancel_work_sync(&wl->recovery_work);
1186 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1187 
1188 	mutex_lock(&wl->mutex);
1189 	wl1271_power_off(wl);
1190 	wl->flags = 0;
1191 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1192 	wl->state = WLCORE_STATE_OFF;
1193 	wl->plt = false;
1194 	wl->plt_mode = PLT_OFF;
1195 	wl->rx_counter = 0;
1196 	mutex_unlock(&wl->mutex);
1197 
1198 out:
1199 	return ret;
1200 }
1201 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1202 static void wl1271_op_tx(struct ieee80211_hw *hw,
1203 			 struct ieee80211_tx_control *control,
1204 			 struct sk_buff *skb)
1205 {
1206 	struct wl1271 *wl = hw->priv;
1207 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1208 	struct ieee80211_vif *vif = info->control.vif;
1209 	struct wl12xx_vif *wlvif = NULL;
1210 	unsigned long flags;
1211 	int q, mapping;
1212 	u8 hlid;
1213 
1214 	if (!vif) {
1215 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1216 		ieee80211_free_txskb(hw, skb);
1217 		return;
1218 	}
1219 
1220 	wlvif = wl12xx_vif_to_data(vif);
1221 	mapping = skb_get_queue_mapping(skb);
1222 	q = wl1271_tx_get_queue(mapping);
1223 
1224 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1225 
1226 	spin_lock_irqsave(&wl->wl_lock, flags);
1227 
1228 	/*
1229 	 * drop the packet if the link is invalid or the queue is stopped
1230 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1231 	 * allow these packets through.
1232 	 */
1233 	if (hlid == WL12XX_INVALID_LINK_ID ||
1234 	    (!test_bit(hlid, wlvif->links_map)) ||
1235 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1236 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1237 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1238 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1239 		ieee80211_free_txskb(hw, skb);
1240 		goto out;
1241 	}
1242 
1243 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1244 		     hlid, q, skb->len);
1245 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1246 
1247 	wl->tx_queue_count[q]++;
1248 	wlvif->tx_queue_count[q]++;
1249 
1250 	/*
1251 	 * The workqueue is slow to process the tx_queue and we need stop
1252 	 * the queue here, otherwise the queue will get too long.
1253 	 */
1254 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1255 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1256 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1257 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1258 		wlcore_stop_queue_locked(wl, wlvif, q,
1259 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1260 	}
1261 
1262 	/*
1263 	 * The chip specific setup must run before the first TX packet -
1264 	 * before that, the tx_work will not be initialized!
1265 	 */
1266 
1267 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1268 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1269 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1270 
1271 out:
1272 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1273 }
1274 
wl1271_tx_dummy_packet(struct wl1271 * wl)1275 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1276 {
1277 	unsigned long flags;
1278 	int q;
1279 
1280 	/* no need to queue a new dummy packet if one is already pending */
1281 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1282 		return 0;
1283 
1284 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1285 
1286 	spin_lock_irqsave(&wl->wl_lock, flags);
1287 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1288 	wl->tx_queue_count[q]++;
1289 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1290 
1291 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1292 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1293 		return wlcore_tx_work_locked(wl);
1294 
1295 	/*
1296 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1297 	 * interrupt handler function
1298 	 */
1299 	return 0;
1300 }
1301 
1302 /*
1303  * The size of the dummy packet should be at least 1400 bytes. However, in
1304  * order to minimize the number of bus transactions, aligning it to 512 bytes
1305  * boundaries could be beneficial, performance wise
1306  */
1307 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1308 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1309 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1310 {
1311 	struct sk_buff *skb;
1312 	struct ieee80211_hdr_3addr *hdr;
1313 	unsigned int dummy_packet_size;
1314 
1315 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1316 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1317 
1318 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1319 	if (!skb) {
1320 		wl1271_warning("Failed to allocate a dummy packet skb");
1321 		return NULL;
1322 	}
1323 
1324 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1325 
1326 	hdr = skb_put_zero(skb, sizeof(*hdr));
1327 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1328 					 IEEE80211_STYPE_NULLFUNC |
1329 					 IEEE80211_FCTL_TODS);
1330 
1331 	skb_put_zero(skb, dummy_packet_size);
1332 
1333 	/* Dummy packets require the TID to be management */
1334 	skb->priority = WL1271_TID_MGMT;
1335 
1336 	/* Initialize all fields that might be used */
1337 	skb_set_queue_mapping(skb, 0);
1338 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1339 
1340 	return skb;
1341 }
1342 
1343 
1344 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1345 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1346 {
1347 	int num_fields = 0, in_field = 0, fields_size = 0;
1348 	int i, pattern_len = 0;
1349 
1350 	if (!p->mask) {
1351 		wl1271_warning("No mask in WoWLAN pattern");
1352 		return -EINVAL;
1353 	}
1354 
1355 	/*
1356 	 * The pattern is broken up into segments of bytes at different offsets
1357 	 * that need to be checked by the FW filter. Each segment is called
1358 	 * a field in the FW API. We verify that the total number of fields
1359 	 * required for this pattern won't exceed FW limits (8)
1360 	 * as well as the total fields buffer won't exceed the FW limit.
1361 	 * Note that if there's a pattern which crosses Ethernet/IP header
1362 	 * boundary a new field is required.
1363 	 */
1364 	for (i = 0; i < p->pattern_len; i++) {
1365 		if (test_bit(i, (unsigned long *)p->mask)) {
1366 			if (!in_field) {
1367 				in_field = 1;
1368 				pattern_len = 1;
1369 			} else {
1370 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1371 					num_fields++;
1372 					fields_size += pattern_len +
1373 						RX_FILTER_FIELD_OVERHEAD;
1374 					pattern_len = 1;
1375 				} else
1376 					pattern_len++;
1377 			}
1378 		} else {
1379 			if (in_field) {
1380 				in_field = 0;
1381 				fields_size += pattern_len +
1382 					RX_FILTER_FIELD_OVERHEAD;
1383 				num_fields++;
1384 			}
1385 		}
1386 	}
1387 
1388 	if (in_field) {
1389 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1390 		num_fields++;
1391 	}
1392 
1393 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1394 		wl1271_warning("RX Filter too complex. Too many segments");
1395 		return -EINVAL;
1396 	}
1397 
1398 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1399 		wl1271_warning("RX filter pattern is too big");
1400 		return -E2BIG;
1401 	}
1402 
1403 	return 0;
1404 }
1405 
wl1271_rx_filter_alloc(void)1406 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1407 {
1408 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1409 }
1410 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1411 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1412 {
1413 	int i;
1414 
1415 	if (filter == NULL)
1416 		return;
1417 
1418 	for (i = 0; i < filter->num_fields; i++)
1419 		kfree(filter->fields[i].pattern);
1420 
1421 	kfree(filter);
1422 }
1423 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1424 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1425 				 u16 offset, u8 flags,
1426 				 const u8 *pattern, u8 len)
1427 {
1428 	struct wl12xx_rx_filter_field *field;
1429 
1430 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1431 		wl1271_warning("Max fields per RX filter. can't alloc another");
1432 		return -EINVAL;
1433 	}
1434 
1435 	field = &filter->fields[filter->num_fields];
1436 
1437 	field->pattern = kzalloc(len, GFP_KERNEL);
1438 	if (!field->pattern) {
1439 		wl1271_warning("Failed to allocate RX filter pattern");
1440 		return -ENOMEM;
1441 	}
1442 
1443 	filter->num_fields++;
1444 
1445 	field->offset = cpu_to_le16(offset);
1446 	field->flags = flags;
1447 	field->len = len;
1448 	memcpy(field->pattern, pattern, len);
1449 
1450 	return 0;
1451 }
1452 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1453 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1454 {
1455 	int i, fields_size = 0;
1456 
1457 	for (i = 0; i < filter->num_fields; i++)
1458 		fields_size += filter->fields[i].len +
1459 			sizeof(struct wl12xx_rx_filter_field) -
1460 			sizeof(u8 *);
1461 
1462 	return fields_size;
1463 }
1464 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1465 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1466 				    u8 *buf)
1467 {
1468 	int i;
1469 	struct wl12xx_rx_filter_field *field;
1470 
1471 	for (i = 0; i < filter->num_fields; i++) {
1472 		field = (struct wl12xx_rx_filter_field *)buf;
1473 
1474 		field->offset = filter->fields[i].offset;
1475 		field->flags = filter->fields[i].flags;
1476 		field->len = filter->fields[i].len;
1477 
1478 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1479 		buf += sizeof(struct wl12xx_rx_filter_field) -
1480 			sizeof(u8 *) + field->len;
1481 	}
1482 }
1483 
1484 /*
1485  * Allocates an RX filter returned through f
1486  * which needs to be freed using rx_filter_free()
1487  */
1488 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1489 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1490 					   struct wl12xx_rx_filter **f)
1491 {
1492 	int i, j, ret = 0;
1493 	struct wl12xx_rx_filter *filter;
1494 	u16 offset;
1495 	u8 flags, len;
1496 
1497 	filter = wl1271_rx_filter_alloc();
1498 	if (!filter) {
1499 		wl1271_warning("Failed to alloc rx filter");
1500 		ret = -ENOMEM;
1501 		goto err;
1502 	}
1503 
1504 	i = 0;
1505 	while (i < p->pattern_len) {
1506 		if (!test_bit(i, (unsigned long *)p->mask)) {
1507 			i++;
1508 			continue;
1509 		}
1510 
1511 		for (j = i; j < p->pattern_len; j++) {
1512 			if (!test_bit(j, (unsigned long *)p->mask))
1513 				break;
1514 
1515 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1516 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1517 				break;
1518 		}
1519 
1520 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1521 			offset = i;
1522 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1523 		} else {
1524 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1525 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1526 		}
1527 
1528 		len = j - i;
1529 
1530 		ret = wl1271_rx_filter_alloc_field(filter,
1531 						   offset,
1532 						   flags,
1533 						   &p->pattern[i], len);
1534 		if (ret)
1535 			goto err;
1536 
1537 		i = j;
1538 	}
1539 
1540 	filter->action = FILTER_SIGNAL;
1541 
1542 	*f = filter;
1543 	return 0;
1544 
1545 err:
1546 	wl1271_rx_filter_free(filter);
1547 	*f = NULL;
1548 
1549 	return ret;
1550 }
1551 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1552 static int wl1271_configure_wowlan(struct wl1271 *wl,
1553 				   struct cfg80211_wowlan *wow)
1554 {
1555 	int i, ret;
1556 
1557 	if (!wow || wow->any || !wow->n_patterns) {
1558 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1559 							  FILTER_SIGNAL);
1560 		if (ret)
1561 			goto out;
1562 
1563 		ret = wl1271_rx_filter_clear_all(wl);
1564 		if (ret)
1565 			goto out;
1566 
1567 		return 0;
1568 	}
1569 
1570 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1571 		return -EINVAL;
1572 
1573 	/* Validate all incoming patterns before clearing current FW state */
1574 	for (i = 0; i < wow->n_patterns; i++) {
1575 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1576 		if (ret) {
1577 			wl1271_warning("Bad wowlan pattern %d", i);
1578 			return ret;
1579 		}
1580 	}
1581 
1582 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1583 	if (ret)
1584 		goto out;
1585 
1586 	ret = wl1271_rx_filter_clear_all(wl);
1587 	if (ret)
1588 		goto out;
1589 
1590 	/* Translate WoWLAN patterns into filters */
1591 	for (i = 0; i < wow->n_patterns; i++) {
1592 		struct cfg80211_pkt_pattern *p;
1593 		struct wl12xx_rx_filter *filter = NULL;
1594 
1595 		p = &wow->patterns[i];
1596 
1597 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1598 		if (ret) {
1599 			wl1271_warning("Failed to create an RX filter from "
1600 				       "wowlan pattern %d", i);
1601 			goto out;
1602 		}
1603 
1604 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1605 
1606 		wl1271_rx_filter_free(filter);
1607 		if (ret)
1608 			goto out;
1609 	}
1610 
1611 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1612 
1613 out:
1614 	return ret;
1615 }
1616 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1617 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1618 					struct wl12xx_vif *wlvif,
1619 					struct cfg80211_wowlan *wow)
1620 {
1621 	int ret = 0;
1622 
1623 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1624 		goto out;
1625 
1626 	ret = wl1271_configure_wowlan(wl, wow);
1627 	if (ret < 0)
1628 		goto out;
1629 
1630 	if ((wl->conf.conn.suspend_wake_up_event ==
1631 	     wl->conf.conn.wake_up_event) &&
1632 	    (wl->conf.conn.suspend_listen_interval ==
1633 	     wl->conf.conn.listen_interval))
1634 		goto out;
1635 
1636 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 				    wl->conf.conn.suspend_wake_up_event,
1638 				    wl->conf.conn.suspend_listen_interval);
1639 
1640 	if (ret < 0)
1641 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1642 out:
1643 	return ret;
1644 
1645 }
1646 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 					struct wl12xx_vif *wlvif,
1649 					struct cfg80211_wowlan *wow)
1650 {
1651 	int ret = 0;
1652 
1653 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1654 		goto out;
1655 
1656 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1657 	if (ret < 0)
1658 		goto out;
1659 
1660 	ret = wl1271_configure_wowlan(wl, wow);
1661 	if (ret < 0)
1662 		goto out;
1663 
1664 out:
1665 	return ret;
1666 
1667 }
1668 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1669 static int wl1271_configure_suspend(struct wl1271 *wl,
1670 				    struct wl12xx_vif *wlvif,
1671 				    struct cfg80211_wowlan *wow)
1672 {
1673 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1674 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1675 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1676 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1677 	return 0;
1678 }
1679 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1680 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1681 {
1682 	int ret = 0;
1683 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1684 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1685 
1686 	if ((!is_ap) && (!is_sta))
1687 		return;
1688 
1689 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1690 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1691 		return;
1692 
1693 	wl1271_configure_wowlan(wl, NULL);
1694 
1695 	if (is_sta) {
1696 		if ((wl->conf.conn.suspend_wake_up_event ==
1697 		     wl->conf.conn.wake_up_event) &&
1698 		    (wl->conf.conn.suspend_listen_interval ==
1699 		     wl->conf.conn.listen_interval))
1700 			return;
1701 
1702 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1703 				    wl->conf.conn.wake_up_event,
1704 				    wl->conf.conn.listen_interval);
1705 
1706 		if (ret < 0)
1707 			wl1271_error("resume: wake up conditions failed: %d",
1708 				     ret);
1709 
1710 	} else if (is_ap) {
1711 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1712 	}
1713 }
1714 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1715 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1716 					    struct cfg80211_wowlan *wow)
1717 {
1718 	struct wl1271 *wl = hw->priv;
1719 	struct wl12xx_vif *wlvif;
1720 	unsigned long flags;
1721 	int ret;
1722 
1723 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1724 	WARN_ON(!wow);
1725 
1726 	/* we want to perform the recovery before suspending */
1727 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1728 		wl1271_warning("postponing suspend to perform recovery");
1729 		return -EBUSY;
1730 	}
1731 
1732 	wl1271_tx_flush(wl);
1733 
1734 	mutex_lock(&wl->mutex);
1735 
1736 	ret = pm_runtime_get_sync(wl->dev);
1737 	if (ret < 0) {
1738 		pm_runtime_put_noidle(wl->dev);
1739 		mutex_unlock(&wl->mutex);
1740 		return ret;
1741 	}
1742 
1743 	wl->wow_enabled = true;
1744 	wl12xx_for_each_wlvif(wl, wlvif) {
1745 		if (wlcore_is_p2p_mgmt(wlvif))
1746 			continue;
1747 
1748 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1749 		if (ret < 0) {
1750 			mutex_unlock(&wl->mutex);
1751 			wl1271_warning("couldn't prepare device to suspend");
1752 			return ret;
1753 		}
1754 	}
1755 
1756 	/* disable fast link flow control notifications from FW */
1757 	ret = wlcore_hw_interrupt_notify(wl, false);
1758 	if (ret < 0)
1759 		goto out_sleep;
1760 
1761 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1762 	ret = wlcore_hw_rx_ba_filter(wl,
1763 				     !!wl->conf.conn.suspend_rx_ba_activity);
1764 	if (ret < 0)
1765 		goto out_sleep;
1766 
1767 out_sleep:
1768 	pm_runtime_put_noidle(wl->dev);
1769 	mutex_unlock(&wl->mutex);
1770 
1771 	if (ret < 0) {
1772 		wl1271_warning("couldn't prepare device to suspend");
1773 		return ret;
1774 	}
1775 
1776 	/* flush any remaining work */
1777 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1778 
1779 	flush_work(&wl->tx_work);
1780 
1781 	/*
1782 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1783 	 * it on resume anyway.
1784 	 */
1785 	cancel_delayed_work(&wl->tx_watchdog_work);
1786 
1787 	/*
1788 	 * set suspended flag to avoid triggering a new threaded_irq
1789 	 * work.
1790 	 */
1791 	spin_lock_irqsave(&wl->wl_lock, flags);
1792 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1793 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1794 
1795 	return pm_runtime_force_suspend(wl->dev);
1796 }
1797 
wl1271_op_resume(struct ieee80211_hw * hw)1798 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1799 {
1800 	struct wl1271 *wl = hw->priv;
1801 	struct wl12xx_vif *wlvif;
1802 	unsigned long flags;
1803 	bool run_irq_work = false, pending_recovery;
1804 	int ret;
1805 
1806 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1807 		     wl->wow_enabled);
1808 	WARN_ON(!wl->wow_enabled);
1809 
1810 	ret = pm_runtime_force_resume(wl->dev);
1811 	if (ret < 0) {
1812 		wl1271_error("ELP wakeup failure!");
1813 		goto out_sleep;
1814 	}
1815 
1816 	/*
1817 	 * re-enable irq_work enqueuing, and call irq_work directly if
1818 	 * there is a pending work.
1819 	 */
1820 	spin_lock_irqsave(&wl->wl_lock, flags);
1821 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1822 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1823 		run_irq_work = true;
1824 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1825 
1826 	mutex_lock(&wl->mutex);
1827 
1828 	/* test the recovery flag before calling any SDIO functions */
1829 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1830 				    &wl->flags);
1831 
1832 	if (run_irq_work) {
1833 		wl1271_debug(DEBUG_MAC80211,
1834 			     "run postponed irq_work directly");
1835 
1836 		/* don't talk to the HW if recovery is pending */
1837 		if (!pending_recovery) {
1838 			ret = wlcore_irq_locked(wl);
1839 			if (ret)
1840 				wl12xx_queue_recovery_work(wl);
1841 		}
1842 
1843 		wlcore_enable_interrupts(wl);
1844 	}
1845 
1846 	if (pending_recovery) {
1847 		wl1271_warning("queuing forgotten recovery on resume");
1848 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1849 		goto out_sleep;
1850 	}
1851 
1852 	ret = pm_runtime_get_sync(wl->dev);
1853 	if (ret < 0) {
1854 		pm_runtime_put_noidle(wl->dev);
1855 		goto out;
1856 	}
1857 
1858 	wl12xx_for_each_wlvif(wl, wlvif) {
1859 		if (wlcore_is_p2p_mgmt(wlvif))
1860 			continue;
1861 
1862 		wl1271_configure_resume(wl, wlvif);
1863 	}
1864 
1865 	ret = wlcore_hw_interrupt_notify(wl, true);
1866 	if (ret < 0)
1867 		goto out_sleep;
1868 
1869 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1870 	ret = wlcore_hw_rx_ba_filter(wl, false);
1871 	if (ret < 0)
1872 		goto out_sleep;
1873 
1874 out_sleep:
1875 	pm_runtime_mark_last_busy(wl->dev);
1876 	pm_runtime_put_autosuspend(wl->dev);
1877 
1878 out:
1879 	wl->wow_enabled = false;
1880 
1881 	/*
1882 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1883 	 * That way we avoid possible conditions where Tx-complete interrupts
1884 	 * fail to arrive and we perform a spurious recovery.
1885 	 */
1886 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1887 	mutex_unlock(&wl->mutex);
1888 
1889 	return 0;
1890 }
1891 
wl1271_op_start(struct ieee80211_hw * hw)1892 static int wl1271_op_start(struct ieee80211_hw *hw)
1893 {
1894 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1895 
1896 	/*
1897 	 * We have to delay the booting of the hardware because
1898 	 * we need to know the local MAC address before downloading and
1899 	 * initializing the firmware. The MAC address cannot be changed
1900 	 * after boot, and without the proper MAC address, the firmware
1901 	 * will not function properly.
1902 	 *
1903 	 * The MAC address is first known when the corresponding interface
1904 	 * is added. That is where we will initialize the hardware.
1905 	 */
1906 
1907 	return 0;
1908 }
1909 
wlcore_op_stop_locked(struct wl1271 * wl)1910 static void wlcore_op_stop_locked(struct wl1271 *wl)
1911 {
1912 	int i;
1913 
1914 	if (wl->state == WLCORE_STATE_OFF) {
1915 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1916 					&wl->flags))
1917 			wlcore_enable_interrupts(wl);
1918 
1919 		return;
1920 	}
1921 
1922 	/*
1923 	 * this must be before the cancel_work calls below, so that the work
1924 	 * functions don't perform further work.
1925 	 */
1926 	wl->state = WLCORE_STATE_OFF;
1927 
1928 	/*
1929 	 * Use the nosync variant to disable interrupts, so the mutex could be
1930 	 * held while doing so without deadlocking.
1931 	 */
1932 	wlcore_disable_interrupts_nosync(wl);
1933 
1934 	mutex_unlock(&wl->mutex);
1935 
1936 	wlcore_synchronize_interrupts(wl);
1937 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1938 		cancel_work_sync(&wl->recovery_work);
1939 	wl1271_flush_deferred_work(wl);
1940 	cancel_delayed_work_sync(&wl->scan_complete_work);
1941 	cancel_work_sync(&wl->netstack_work);
1942 	cancel_work_sync(&wl->tx_work);
1943 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1944 
1945 	/* let's notify MAC80211 about the remaining pending TX frames */
1946 	mutex_lock(&wl->mutex);
1947 	wl12xx_tx_reset(wl);
1948 
1949 	wl1271_power_off(wl);
1950 	/*
1951 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1952 	 * an interrupt storm. Now that the power is down, it is safe to
1953 	 * re-enable interrupts to balance the disable depth
1954 	 */
1955 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1956 		wlcore_enable_interrupts(wl);
1957 
1958 	wl->band = NL80211_BAND_2GHZ;
1959 
1960 	wl->rx_counter = 0;
1961 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1962 	wl->channel_type = NL80211_CHAN_NO_HT;
1963 	wl->tx_blocks_available = 0;
1964 	wl->tx_allocated_blocks = 0;
1965 	wl->tx_results_count = 0;
1966 	wl->tx_packets_count = 0;
1967 	wl->time_offset = 0;
1968 	wl->ap_fw_ps_map = 0;
1969 	wl->ap_ps_map = 0;
1970 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1971 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1972 	memset(wl->links_map, 0, sizeof(wl->links_map));
1973 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1974 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1975 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1976 	wl->active_sta_count = 0;
1977 	wl->active_link_count = 0;
1978 
1979 	/* The system link is always allocated */
1980 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1981 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1982 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1983 
1984 	/*
1985 	 * this is performed after the cancel_work calls and the associated
1986 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1987 	 * get executed before all these vars have been reset.
1988 	 */
1989 	wl->flags = 0;
1990 
1991 	wl->tx_blocks_freed = 0;
1992 
1993 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1994 		wl->tx_pkts_freed[i] = 0;
1995 		wl->tx_allocated_pkts[i] = 0;
1996 	}
1997 
1998 	wl1271_debugfs_reset(wl);
1999 
2000 	kfree(wl->raw_fw_status);
2001 	wl->raw_fw_status = NULL;
2002 	kfree(wl->fw_status);
2003 	wl->fw_status = NULL;
2004 	kfree(wl->tx_res_if);
2005 	wl->tx_res_if = NULL;
2006 	kfree(wl->target_mem_map);
2007 	wl->target_mem_map = NULL;
2008 
2009 	/*
2010 	 * FW channels must be re-calibrated after recovery,
2011 	 * save current Reg-Domain channel configuration and clear it.
2012 	 */
2013 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2014 	       sizeof(wl->reg_ch_conf_pending));
2015 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2016 }
2017 
wlcore_op_stop(struct ieee80211_hw * hw)2018 static void wlcore_op_stop(struct ieee80211_hw *hw)
2019 {
2020 	struct wl1271 *wl = hw->priv;
2021 
2022 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2023 
2024 	mutex_lock(&wl->mutex);
2025 
2026 	wlcore_op_stop_locked(wl);
2027 
2028 	mutex_unlock(&wl->mutex);
2029 }
2030 
wlcore_channel_switch_work(struct work_struct * work)2031 static void wlcore_channel_switch_work(struct work_struct *work)
2032 {
2033 	struct delayed_work *dwork;
2034 	struct wl1271 *wl;
2035 	struct ieee80211_vif *vif;
2036 	struct wl12xx_vif *wlvif;
2037 	int ret;
2038 
2039 	dwork = to_delayed_work(work);
2040 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2041 	wl = wlvif->wl;
2042 
2043 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2044 
2045 	mutex_lock(&wl->mutex);
2046 
2047 	if (unlikely(wl->state != WLCORE_STATE_ON))
2048 		goto out;
2049 
2050 	/* check the channel switch is still ongoing */
2051 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2052 		goto out;
2053 
2054 	vif = wl12xx_wlvif_to_vif(wlvif);
2055 	ieee80211_chswitch_done(vif, false);
2056 
2057 	ret = pm_runtime_get_sync(wl->dev);
2058 	if (ret < 0) {
2059 		pm_runtime_put_noidle(wl->dev);
2060 		goto out;
2061 	}
2062 
2063 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2064 
2065 	pm_runtime_mark_last_busy(wl->dev);
2066 	pm_runtime_put_autosuspend(wl->dev);
2067 out:
2068 	mutex_unlock(&wl->mutex);
2069 }
2070 
wlcore_connection_loss_work(struct work_struct * work)2071 static void wlcore_connection_loss_work(struct work_struct *work)
2072 {
2073 	struct delayed_work *dwork;
2074 	struct wl1271 *wl;
2075 	struct ieee80211_vif *vif;
2076 	struct wl12xx_vif *wlvif;
2077 
2078 	dwork = to_delayed_work(work);
2079 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2080 	wl = wlvif->wl;
2081 
2082 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2083 
2084 	mutex_lock(&wl->mutex);
2085 
2086 	if (unlikely(wl->state != WLCORE_STATE_ON))
2087 		goto out;
2088 
2089 	/* Call mac80211 connection loss */
2090 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2091 		goto out;
2092 
2093 	vif = wl12xx_wlvif_to_vif(wlvif);
2094 	ieee80211_connection_loss(vif);
2095 out:
2096 	mutex_unlock(&wl->mutex);
2097 }
2098 
wlcore_pending_auth_complete_work(struct work_struct * work)2099 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2100 {
2101 	struct delayed_work *dwork;
2102 	struct wl1271 *wl;
2103 	struct wl12xx_vif *wlvif;
2104 	unsigned long time_spare;
2105 	int ret;
2106 
2107 	dwork = to_delayed_work(work);
2108 	wlvif = container_of(dwork, struct wl12xx_vif,
2109 			     pending_auth_complete_work);
2110 	wl = wlvif->wl;
2111 
2112 	mutex_lock(&wl->mutex);
2113 
2114 	if (unlikely(wl->state != WLCORE_STATE_ON))
2115 		goto out;
2116 
2117 	/*
2118 	 * Make sure a second really passed since the last auth reply. Maybe
2119 	 * a second auth reply arrived while we were stuck on the mutex.
2120 	 * Check for a little less than the timeout to protect from scheduler
2121 	 * irregularities.
2122 	 */
2123 	time_spare = jiffies +
2124 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2125 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2126 		goto out;
2127 
2128 	ret = pm_runtime_get_sync(wl->dev);
2129 	if (ret < 0) {
2130 		pm_runtime_put_noidle(wl->dev);
2131 		goto out;
2132 	}
2133 
2134 	/* cancel the ROC if active */
2135 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2136 
2137 	pm_runtime_mark_last_busy(wl->dev);
2138 	pm_runtime_put_autosuspend(wl->dev);
2139 out:
2140 	mutex_unlock(&wl->mutex);
2141 }
2142 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2143 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2144 {
2145 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2146 					WL12XX_MAX_RATE_POLICIES);
2147 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2148 		return -EBUSY;
2149 
2150 	__set_bit(policy, wl->rate_policies_map);
2151 	*idx = policy;
2152 	return 0;
2153 }
2154 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2155 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2156 {
2157 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2158 		return;
2159 
2160 	__clear_bit(*idx, wl->rate_policies_map);
2161 	*idx = WL12XX_MAX_RATE_POLICIES;
2162 }
2163 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2164 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2165 {
2166 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2167 					WLCORE_MAX_KLV_TEMPLATES);
2168 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2169 		return -EBUSY;
2170 
2171 	__set_bit(policy, wl->klv_templates_map);
2172 	*idx = policy;
2173 	return 0;
2174 }
2175 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2176 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2177 {
2178 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2179 		return;
2180 
2181 	__clear_bit(*idx, wl->klv_templates_map);
2182 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2183 }
2184 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2185 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2186 {
2187 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2188 
2189 	switch (wlvif->bss_type) {
2190 	case BSS_TYPE_AP_BSS:
2191 		if (wlvif->p2p)
2192 			return WL1271_ROLE_P2P_GO;
2193 		else if (ieee80211_vif_is_mesh(vif))
2194 			return WL1271_ROLE_MESH_POINT;
2195 		else
2196 			return WL1271_ROLE_AP;
2197 
2198 	case BSS_TYPE_STA_BSS:
2199 		if (wlvif->p2p)
2200 			return WL1271_ROLE_P2P_CL;
2201 		else
2202 			return WL1271_ROLE_STA;
2203 
2204 	case BSS_TYPE_IBSS:
2205 		return WL1271_ROLE_IBSS;
2206 
2207 	default:
2208 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2209 	}
2210 	return WL12XX_INVALID_ROLE_TYPE;
2211 }
2212 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2213 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2214 {
2215 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2216 	int i;
2217 
2218 	/* clear everything but the persistent data */
2219 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2220 
2221 	switch (ieee80211_vif_type_p2p(vif)) {
2222 	case NL80211_IFTYPE_P2P_CLIENT:
2223 		wlvif->p2p = 1;
2224 		/* fall-through */
2225 	case NL80211_IFTYPE_STATION:
2226 	case NL80211_IFTYPE_P2P_DEVICE:
2227 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2228 		break;
2229 	case NL80211_IFTYPE_ADHOC:
2230 		wlvif->bss_type = BSS_TYPE_IBSS;
2231 		break;
2232 	case NL80211_IFTYPE_P2P_GO:
2233 		wlvif->p2p = 1;
2234 		/* fall-through */
2235 	case NL80211_IFTYPE_AP:
2236 	case NL80211_IFTYPE_MESH_POINT:
2237 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2238 		break;
2239 	default:
2240 		wlvif->bss_type = MAX_BSS_TYPE;
2241 		return -EOPNOTSUPP;
2242 	}
2243 
2244 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2245 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2246 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2247 
2248 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2249 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2250 		/* init sta/ibss data */
2251 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2252 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2253 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2254 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2255 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2256 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2257 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2258 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2259 	} else {
2260 		/* init ap data */
2261 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2262 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2263 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2264 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2265 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2266 			wl12xx_allocate_rate_policy(wl,
2267 						&wlvif->ap.ucast_rate_idx[i]);
2268 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2269 		/*
2270 		 * TODO: check if basic_rate shouldn't be
2271 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2272 		 * instead (the same thing for STA above).
2273 		*/
2274 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2275 		/* TODO: this seems to be used only for STA, check it */
2276 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2277 	}
2278 
2279 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2280 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2281 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2282 
2283 	/*
2284 	 * mac80211 configures some values globally, while we treat them
2285 	 * per-interface. thus, on init, we have to copy them from wl
2286 	 */
2287 	wlvif->band = wl->band;
2288 	wlvif->channel = wl->channel;
2289 	wlvif->power_level = wl->power_level;
2290 	wlvif->channel_type = wl->channel_type;
2291 
2292 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2293 		  wl1271_rx_streaming_enable_work);
2294 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2295 		  wl1271_rx_streaming_disable_work);
2296 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2297 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2298 			  wlcore_channel_switch_work);
2299 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2300 			  wlcore_connection_loss_work);
2301 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2302 			  wlcore_pending_auth_complete_work);
2303 	INIT_LIST_HEAD(&wlvif->list);
2304 
2305 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2306 	return 0;
2307 }
2308 
wl12xx_init_fw(struct wl1271 * wl)2309 static int wl12xx_init_fw(struct wl1271 *wl)
2310 {
2311 	int retries = WL1271_BOOT_RETRIES;
2312 	bool booted = false;
2313 	struct wiphy *wiphy = wl->hw->wiphy;
2314 	int ret;
2315 
2316 	while (retries) {
2317 		retries--;
2318 		ret = wl12xx_chip_wakeup(wl, false);
2319 		if (ret < 0)
2320 			goto power_off;
2321 
2322 		ret = wl->ops->boot(wl);
2323 		if (ret < 0)
2324 			goto power_off;
2325 
2326 		ret = wl1271_hw_init(wl);
2327 		if (ret < 0)
2328 			goto irq_disable;
2329 
2330 		booted = true;
2331 		break;
2332 
2333 irq_disable:
2334 		mutex_unlock(&wl->mutex);
2335 		/* Unlocking the mutex in the middle of handling is
2336 		   inherently unsafe. In this case we deem it safe to do,
2337 		   because we need to let any possibly pending IRQ out of
2338 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2339 		   work function will not do anything.) Also, any other
2340 		   possible concurrent operations will fail due to the
2341 		   current state, hence the wl1271 struct should be safe. */
2342 		wlcore_disable_interrupts(wl);
2343 		wl1271_flush_deferred_work(wl);
2344 		cancel_work_sync(&wl->netstack_work);
2345 		mutex_lock(&wl->mutex);
2346 power_off:
2347 		wl1271_power_off(wl);
2348 	}
2349 
2350 	if (!booted) {
2351 		wl1271_error("firmware boot failed despite %d retries",
2352 			     WL1271_BOOT_RETRIES);
2353 		goto out;
2354 	}
2355 
2356 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2357 
2358 	/* update hw/fw version info in wiphy struct */
2359 	wiphy->hw_version = wl->chip.id;
2360 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2361 		sizeof(wiphy->fw_version));
2362 
2363 	/*
2364 	 * Now we know if 11a is supported (info from the NVS), so disable
2365 	 * 11a channels if not supported
2366 	 */
2367 	if (!wl->enable_11a)
2368 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2369 
2370 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2371 		     wl->enable_11a ? "" : "not ");
2372 
2373 	wl->state = WLCORE_STATE_ON;
2374 out:
2375 	return ret;
2376 }
2377 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2378 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2379 {
2380 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2381 }
2382 
2383 /*
2384  * Check whether a fw switch (i.e. moving from one loaded
2385  * fw to another) is needed. This function is also responsible
2386  * for updating wl->last_vif_count, so it must be called before
2387  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2388  * will be used).
2389  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2390 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2391 				  struct vif_counter_data vif_counter_data,
2392 				  bool add)
2393 {
2394 	enum wl12xx_fw_type current_fw = wl->fw_type;
2395 	u8 vif_count = vif_counter_data.counter;
2396 
2397 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2398 		return false;
2399 
2400 	/* increase the vif count if this is a new vif */
2401 	if (add && !vif_counter_data.cur_vif_running)
2402 		vif_count++;
2403 
2404 	wl->last_vif_count = vif_count;
2405 
2406 	/* no need for fw change if the device is OFF */
2407 	if (wl->state == WLCORE_STATE_OFF)
2408 		return false;
2409 
2410 	/* no need for fw change if a single fw is used */
2411 	if (!wl->mr_fw_name)
2412 		return false;
2413 
2414 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2415 		return true;
2416 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2417 		return true;
2418 
2419 	return false;
2420 }
2421 
2422 /*
2423  * Enter "forced psm". Make sure the sta is in psm against the ap,
2424  * to make the fw switch a bit more disconnection-persistent.
2425  */
wl12xx_force_active_psm(struct wl1271 * wl)2426 static void wl12xx_force_active_psm(struct wl1271 *wl)
2427 {
2428 	struct wl12xx_vif *wlvif;
2429 
2430 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2431 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2432 	}
2433 }
2434 
2435 struct wlcore_hw_queue_iter_data {
2436 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2437 	/* current vif */
2438 	struct ieee80211_vif *vif;
2439 	/* is the current vif among those iterated */
2440 	bool cur_running;
2441 };
2442 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2443 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2444 				 struct ieee80211_vif *vif)
2445 {
2446 	struct wlcore_hw_queue_iter_data *iter_data = data;
2447 
2448 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2449 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2450 		return;
2451 
2452 	if (iter_data->cur_running || vif == iter_data->vif) {
2453 		iter_data->cur_running = true;
2454 		return;
2455 	}
2456 
2457 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2458 }
2459 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2460 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2461 					 struct wl12xx_vif *wlvif)
2462 {
2463 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2464 	struct wlcore_hw_queue_iter_data iter_data = {};
2465 	int i, q_base;
2466 
2467 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2468 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2469 		return 0;
2470 	}
2471 
2472 	iter_data.vif = vif;
2473 
2474 	/* mark all bits taken by active interfaces */
2475 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2476 					IEEE80211_IFACE_ITER_RESUME_ALL,
2477 					wlcore_hw_queue_iter, &iter_data);
2478 
2479 	/* the current vif is already running in mac80211 (resume/recovery) */
2480 	if (iter_data.cur_running) {
2481 		wlvif->hw_queue_base = vif->hw_queue[0];
2482 		wl1271_debug(DEBUG_MAC80211,
2483 			     "using pre-allocated hw queue base %d",
2484 			     wlvif->hw_queue_base);
2485 
2486 		/* interface type might have changed type */
2487 		goto adjust_cab_queue;
2488 	}
2489 
2490 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2491 				     WLCORE_NUM_MAC_ADDRESSES);
2492 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2493 		return -EBUSY;
2494 
2495 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2496 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2497 		     wlvif->hw_queue_base);
2498 
2499 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2500 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2501 		/* register hw queues in mac80211 */
2502 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2503 	}
2504 
2505 adjust_cab_queue:
2506 	/* the last places are reserved for cab queues per interface */
2507 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2508 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2509 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2510 	else
2511 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2512 
2513 	return 0;
2514 }
2515 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2516 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2517 				   struct ieee80211_vif *vif)
2518 {
2519 	struct wl1271 *wl = hw->priv;
2520 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2521 	struct vif_counter_data vif_count;
2522 	int ret = 0;
2523 	u8 role_type;
2524 
2525 	if (wl->plt) {
2526 		wl1271_error("Adding Interface not allowed while in PLT mode");
2527 		return -EBUSY;
2528 	}
2529 
2530 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2531 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2532 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2533 
2534 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2535 		     ieee80211_vif_type_p2p(vif), vif->addr);
2536 
2537 	wl12xx_get_vif_count(hw, vif, &vif_count);
2538 
2539 	mutex_lock(&wl->mutex);
2540 
2541 	/*
2542 	 * in some very corner case HW recovery scenarios its possible to
2543 	 * get here before __wl1271_op_remove_interface is complete, so
2544 	 * opt out if that is the case.
2545 	 */
2546 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2547 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2548 		ret = -EBUSY;
2549 		goto out;
2550 	}
2551 
2552 
2553 	ret = wl12xx_init_vif_data(wl, vif);
2554 	if (ret < 0)
2555 		goto out;
2556 
2557 	wlvif->wl = wl;
2558 	role_type = wl12xx_get_role_type(wl, wlvif);
2559 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2560 		ret = -EINVAL;
2561 		goto out;
2562 	}
2563 
2564 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2565 	if (ret < 0)
2566 		goto out;
2567 
2568 	/*
2569 	 * TODO: after the nvs issue will be solved, move this block
2570 	 * to start(), and make sure here the driver is ON.
2571 	 */
2572 	if (wl->state == WLCORE_STATE_OFF) {
2573 		/*
2574 		 * we still need this in order to configure the fw
2575 		 * while uploading the nvs
2576 		 */
2577 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2578 
2579 		ret = wl12xx_init_fw(wl);
2580 		if (ret < 0)
2581 			goto out;
2582 	}
2583 
2584 	/*
2585 	 * Call runtime PM only after possible wl12xx_init_fw() above
2586 	 * is done. Otherwise we do not have interrupts enabled.
2587 	 */
2588 	ret = pm_runtime_get_sync(wl->dev);
2589 	if (ret < 0) {
2590 		pm_runtime_put_noidle(wl->dev);
2591 		goto out_unlock;
2592 	}
2593 
2594 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2595 		wl12xx_force_active_psm(wl);
2596 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2597 		mutex_unlock(&wl->mutex);
2598 		wl1271_recovery_work(&wl->recovery_work);
2599 		return 0;
2600 	}
2601 
2602 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2603 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2604 					     role_type, &wlvif->role_id);
2605 		if (ret < 0)
2606 			goto out;
2607 
2608 		ret = wl1271_init_vif_specific(wl, vif);
2609 		if (ret < 0)
2610 			goto out;
2611 
2612 	} else {
2613 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2614 					     &wlvif->dev_role_id);
2615 		if (ret < 0)
2616 			goto out;
2617 
2618 		/* needed mainly for configuring rate policies */
2619 		ret = wl1271_sta_hw_init(wl, wlvif);
2620 		if (ret < 0)
2621 			goto out;
2622 	}
2623 
2624 	list_add(&wlvif->list, &wl->wlvif_list);
2625 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2626 
2627 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2628 		wl->ap_count++;
2629 	else
2630 		wl->sta_count++;
2631 out:
2632 	pm_runtime_mark_last_busy(wl->dev);
2633 	pm_runtime_put_autosuspend(wl->dev);
2634 out_unlock:
2635 	mutex_unlock(&wl->mutex);
2636 
2637 	return ret;
2638 }
2639 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2640 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2641 					 struct ieee80211_vif *vif,
2642 					 bool reset_tx_queues)
2643 {
2644 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2645 	int i, ret;
2646 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2647 
2648 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2649 
2650 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2651 		return;
2652 
2653 	/* because of hardware recovery, we may get here twice */
2654 	if (wl->state == WLCORE_STATE_OFF)
2655 		return;
2656 
2657 	wl1271_info("down");
2658 
2659 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2660 	    wl->scan_wlvif == wlvif) {
2661 		struct cfg80211_scan_info info = {
2662 			.aborted = true,
2663 		};
2664 
2665 		/*
2666 		 * Rearm the tx watchdog just before idling scan. This
2667 		 * prevents just-finished scans from triggering the watchdog
2668 		 */
2669 		wl12xx_rearm_tx_watchdog_locked(wl);
2670 
2671 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2672 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2673 		wl->scan_wlvif = NULL;
2674 		wl->scan.req = NULL;
2675 		ieee80211_scan_completed(wl->hw, &info);
2676 	}
2677 
2678 	if (wl->sched_vif == wlvif)
2679 		wl->sched_vif = NULL;
2680 
2681 	if (wl->roc_vif == vif) {
2682 		wl->roc_vif = NULL;
2683 		ieee80211_remain_on_channel_expired(wl->hw);
2684 	}
2685 
2686 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2687 		/* disable active roles */
2688 		ret = pm_runtime_get_sync(wl->dev);
2689 		if (ret < 0) {
2690 			pm_runtime_put_noidle(wl->dev);
2691 			goto deinit;
2692 		}
2693 
2694 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2695 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2696 			if (wl12xx_dev_role_started(wlvif))
2697 				wl12xx_stop_dev(wl, wlvif);
2698 		}
2699 
2700 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2701 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2702 			if (ret < 0)
2703 				goto deinit;
2704 		} else {
2705 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2706 			if (ret < 0)
2707 				goto deinit;
2708 		}
2709 
2710 		pm_runtime_mark_last_busy(wl->dev);
2711 		pm_runtime_put_autosuspend(wl->dev);
2712 	}
2713 deinit:
2714 	wl12xx_tx_reset_wlvif(wl, wlvif);
2715 
2716 	/* clear all hlids (except system_hlid) */
2717 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2718 
2719 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2720 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2721 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2722 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2723 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2724 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2725 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2726 	} else {
2727 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2728 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2729 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2730 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2731 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2732 			wl12xx_free_rate_policy(wl,
2733 						&wlvif->ap.ucast_rate_idx[i]);
2734 		wl1271_free_ap_keys(wl, wlvif);
2735 	}
2736 
2737 	dev_kfree_skb(wlvif->probereq);
2738 	wlvif->probereq = NULL;
2739 	if (wl->last_wlvif == wlvif)
2740 		wl->last_wlvif = NULL;
2741 	list_del(&wlvif->list);
2742 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2743 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2744 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2745 
2746 	if (is_ap)
2747 		wl->ap_count--;
2748 	else
2749 		wl->sta_count--;
2750 
2751 	/*
2752 	 * Last AP, have more stations. Configure sleep auth according to STA.
2753 	 * Don't do thin on unintended recovery.
2754 	 */
2755 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2756 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2757 		goto unlock;
2758 
2759 	if (wl->ap_count == 0 && is_ap) {
2760 		/* mask ap events */
2761 		wl->event_mask &= ~wl->ap_event_mask;
2762 		wl1271_event_unmask(wl);
2763 	}
2764 
2765 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2766 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2767 		/* Configure for power according to debugfs */
2768 		if (sta_auth != WL1271_PSM_ILLEGAL)
2769 			wl1271_acx_sleep_auth(wl, sta_auth);
2770 		/* Configure for ELP power saving */
2771 		else
2772 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2773 	}
2774 
2775 unlock:
2776 	mutex_unlock(&wl->mutex);
2777 
2778 	del_timer_sync(&wlvif->rx_streaming_timer);
2779 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2780 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2781 	cancel_work_sync(&wlvif->rc_update_work);
2782 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2783 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2784 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2785 
2786 	mutex_lock(&wl->mutex);
2787 }
2788 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2789 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2790 				       struct ieee80211_vif *vif)
2791 {
2792 	struct wl1271 *wl = hw->priv;
2793 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2794 	struct wl12xx_vif *iter;
2795 	struct vif_counter_data vif_count;
2796 
2797 	wl12xx_get_vif_count(hw, vif, &vif_count);
2798 	mutex_lock(&wl->mutex);
2799 
2800 	if (wl->state == WLCORE_STATE_OFF ||
2801 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2802 		goto out;
2803 
2804 	/*
2805 	 * wl->vif can be null here if someone shuts down the interface
2806 	 * just when hardware recovery has been started.
2807 	 */
2808 	wl12xx_for_each_wlvif(wl, iter) {
2809 		if (iter != wlvif)
2810 			continue;
2811 
2812 		__wl1271_op_remove_interface(wl, vif, true);
2813 		break;
2814 	}
2815 	WARN_ON(iter != wlvif);
2816 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2817 		wl12xx_force_active_psm(wl);
2818 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2819 		wl12xx_queue_recovery_work(wl);
2820 	}
2821 out:
2822 	mutex_unlock(&wl->mutex);
2823 }
2824 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2825 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2826 				      struct ieee80211_vif *vif,
2827 				      enum nl80211_iftype new_type, bool p2p)
2828 {
2829 	struct wl1271 *wl = hw->priv;
2830 	int ret;
2831 
2832 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2833 	wl1271_op_remove_interface(hw, vif);
2834 
2835 	vif->type = new_type;
2836 	vif->p2p = p2p;
2837 	ret = wl1271_op_add_interface(hw, vif);
2838 
2839 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2840 	return ret;
2841 }
2842 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2843 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2844 {
2845 	int ret;
2846 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2847 
2848 	/*
2849 	 * One of the side effects of the JOIN command is that is clears
2850 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2851 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2852 	 * Currently the only valid scenario for JOIN during association
2853 	 * is on roaming, in which case we will also be given new keys.
2854 	 * Keep the below message for now, unless it starts bothering
2855 	 * users who really like to roam a lot :)
2856 	 */
2857 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2858 		wl1271_info("JOIN while associated.");
2859 
2860 	/* clear encryption type */
2861 	wlvif->encryption_type = KEY_NONE;
2862 
2863 	if (is_ibss)
2864 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2865 	else {
2866 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2867 			/*
2868 			 * TODO: this is an ugly workaround for wl12xx fw
2869 			 * bug - we are not able to tx/rx after the first
2870 			 * start_sta, so make dummy start+stop calls,
2871 			 * and then call start_sta again.
2872 			 * this should be fixed in the fw.
2873 			 */
2874 			wl12xx_cmd_role_start_sta(wl, wlvif);
2875 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2876 		}
2877 
2878 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2879 	}
2880 
2881 	return ret;
2882 }
2883 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2884 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2885 			    int offset)
2886 {
2887 	u8 ssid_len;
2888 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2889 					 skb->len - offset);
2890 
2891 	if (!ptr) {
2892 		wl1271_error("No SSID in IEs!");
2893 		return -ENOENT;
2894 	}
2895 
2896 	ssid_len = ptr[1];
2897 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2898 		wl1271_error("SSID is too long!");
2899 		return -EINVAL;
2900 	}
2901 
2902 	wlvif->ssid_len = ssid_len;
2903 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2904 	return 0;
2905 }
2906 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2907 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2908 {
2909 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2910 	struct sk_buff *skb;
2911 	int ieoffset;
2912 
2913 	/* we currently only support setting the ssid from the ap probe req */
2914 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2915 		return -EINVAL;
2916 
2917 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2918 	if (!skb)
2919 		return -EINVAL;
2920 
2921 	ieoffset = offsetof(struct ieee80211_mgmt,
2922 			    u.probe_req.variable);
2923 	wl1271_ssid_set(wlvif, skb, ieoffset);
2924 	dev_kfree_skb(skb);
2925 
2926 	return 0;
2927 }
2928 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2929 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2930 			    struct ieee80211_bss_conf *bss_conf,
2931 			    u32 sta_rate_set)
2932 {
2933 	int ieoffset;
2934 	int ret;
2935 
2936 	wlvif->aid = bss_conf->aid;
2937 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2938 	wlvif->beacon_int = bss_conf->beacon_int;
2939 	wlvif->wmm_enabled = bss_conf->qos;
2940 
2941 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2942 
2943 	/*
2944 	 * with wl1271, we don't need to update the
2945 	 * beacon_int and dtim_period, because the firmware
2946 	 * updates it by itself when the first beacon is
2947 	 * received after a join.
2948 	 */
2949 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2950 	if (ret < 0)
2951 		return ret;
2952 
2953 	/*
2954 	 * Get a template for hardware connection maintenance
2955 	 */
2956 	dev_kfree_skb(wlvif->probereq);
2957 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2958 							wlvif,
2959 							NULL);
2960 	ieoffset = offsetof(struct ieee80211_mgmt,
2961 			    u.probe_req.variable);
2962 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2963 
2964 	/* enable the connection monitoring feature */
2965 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	/*
2970 	 * The join command disable the keep-alive mode, shut down its process,
2971 	 * and also clear the template config, so we need to reset it all after
2972 	 * the join. The acx_aid starts the keep-alive process, and the order
2973 	 * of the commands below is relevant.
2974 	 */
2975 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2988 					   wlvif->sta.klv_template_id,
2989 					   ACX_KEEP_ALIVE_TPL_VALID);
2990 	if (ret < 0)
2991 		return ret;
2992 
2993 	/*
2994 	 * The default fw psm configuration is AUTO, while mac80211 default
2995 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2996 	 */
2997 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	if (sta_rate_set) {
3002 		wlvif->rate_set =
3003 			wl1271_tx_enabled_rates_get(wl,
3004 						    sta_rate_set,
3005 						    wlvif->band);
3006 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3007 		if (ret < 0)
3008 			return ret;
3009 	}
3010 
3011 	return ret;
3012 }
3013 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3014 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3015 {
3016 	int ret;
3017 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3018 
3019 	/* make sure we are connected (sta) joined */
3020 	if (sta &&
3021 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3022 		return false;
3023 
3024 	/* make sure we are joined (ibss) */
3025 	if (!sta &&
3026 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3027 		return false;
3028 
3029 	if (sta) {
3030 		/* use defaults when not associated */
3031 		wlvif->aid = 0;
3032 
3033 		/* free probe-request template */
3034 		dev_kfree_skb(wlvif->probereq);
3035 		wlvif->probereq = NULL;
3036 
3037 		/* disable connection monitor features */
3038 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3039 		if (ret < 0)
3040 			return ret;
3041 
3042 		/* Disable the keep-alive feature */
3043 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3044 		if (ret < 0)
3045 			return ret;
3046 
3047 		/* disable beacon filtering */
3048 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3049 		if (ret < 0)
3050 			return ret;
3051 	}
3052 
3053 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3054 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3055 
3056 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3057 		ieee80211_chswitch_done(vif, false);
3058 		cancel_delayed_work(&wlvif->channel_switch_work);
3059 	}
3060 
3061 	/* invalidate keep-alive template */
3062 	wl1271_acx_keep_alive_config(wl, wlvif,
3063 				     wlvif->sta.klv_template_id,
3064 				     ACX_KEEP_ALIVE_TPL_INVALID);
3065 
3066 	return 0;
3067 }
3068 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3069 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3070 {
3071 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3072 	wlvif->rate_set = wlvif->basic_rate_set;
3073 }
3074 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3075 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3076 				   bool idle)
3077 {
3078 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3079 
3080 	if (idle == cur_idle)
3081 		return;
3082 
3083 	if (idle) {
3084 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3085 	} else {
3086 		/* The current firmware only supports sched_scan in idle */
3087 		if (wl->sched_vif == wlvif)
3088 			wl->ops->sched_scan_stop(wl, wlvif);
3089 
3090 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3091 	}
3092 }
3093 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3094 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3095 			     struct ieee80211_conf *conf, u32 changed)
3096 {
3097 	int ret;
3098 
3099 	if (wlcore_is_p2p_mgmt(wlvif))
3100 		return 0;
3101 
3102 	if (conf->power_level != wlvif->power_level) {
3103 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3104 		if (ret < 0)
3105 			return ret;
3106 
3107 		wlvif->power_level = conf->power_level;
3108 	}
3109 
3110 	return 0;
3111 }
3112 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3113 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3114 {
3115 	struct wl1271 *wl = hw->priv;
3116 	struct wl12xx_vif *wlvif;
3117 	struct ieee80211_conf *conf = &hw->conf;
3118 	int ret = 0;
3119 
3120 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3121 		     " changed 0x%x",
3122 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3123 		     conf->power_level,
3124 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3125 			 changed);
3126 
3127 	mutex_lock(&wl->mutex);
3128 
3129 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3130 		wl->power_level = conf->power_level;
3131 
3132 	if (unlikely(wl->state != WLCORE_STATE_ON))
3133 		goto out;
3134 
3135 	ret = pm_runtime_get_sync(wl->dev);
3136 	if (ret < 0) {
3137 		pm_runtime_put_noidle(wl->dev);
3138 		goto out;
3139 	}
3140 
3141 	/* configure each interface */
3142 	wl12xx_for_each_wlvif(wl, wlvif) {
3143 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3144 		if (ret < 0)
3145 			goto out_sleep;
3146 	}
3147 
3148 out_sleep:
3149 	pm_runtime_mark_last_busy(wl->dev);
3150 	pm_runtime_put_autosuspend(wl->dev);
3151 
3152 out:
3153 	mutex_unlock(&wl->mutex);
3154 
3155 	return ret;
3156 }
3157 
3158 struct wl1271_filter_params {
3159 	bool enabled;
3160 	int mc_list_length;
3161 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3162 };
3163 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3164 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3165 				       struct netdev_hw_addr_list *mc_list)
3166 {
3167 	struct wl1271_filter_params *fp;
3168 	struct netdev_hw_addr *ha;
3169 
3170 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3171 	if (!fp) {
3172 		wl1271_error("Out of memory setting filters.");
3173 		return 0;
3174 	}
3175 
3176 	/* update multicast filtering parameters */
3177 	fp->mc_list_length = 0;
3178 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3179 		fp->enabled = false;
3180 	} else {
3181 		fp->enabled = true;
3182 		netdev_hw_addr_list_for_each(ha, mc_list) {
3183 			memcpy(fp->mc_list[fp->mc_list_length],
3184 					ha->addr, ETH_ALEN);
3185 			fp->mc_list_length++;
3186 		}
3187 	}
3188 
3189 	return (u64)(unsigned long)fp;
3190 }
3191 
3192 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3193 				  FIF_FCSFAIL | \
3194 				  FIF_BCN_PRBRESP_PROMISC | \
3195 				  FIF_CONTROL | \
3196 				  FIF_OTHER_BSS)
3197 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3198 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3199 				       unsigned int changed,
3200 				       unsigned int *total, u64 multicast)
3201 {
3202 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3203 	struct wl1271 *wl = hw->priv;
3204 	struct wl12xx_vif *wlvif;
3205 
3206 	int ret;
3207 
3208 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3209 		     " total %x", changed, *total);
3210 
3211 	mutex_lock(&wl->mutex);
3212 
3213 	*total &= WL1271_SUPPORTED_FILTERS;
3214 	changed &= WL1271_SUPPORTED_FILTERS;
3215 
3216 	if (unlikely(wl->state != WLCORE_STATE_ON))
3217 		goto out;
3218 
3219 	ret = pm_runtime_get_sync(wl->dev);
3220 	if (ret < 0) {
3221 		pm_runtime_put_noidle(wl->dev);
3222 		goto out;
3223 	}
3224 
3225 	wl12xx_for_each_wlvif(wl, wlvif) {
3226 		if (wlcore_is_p2p_mgmt(wlvif))
3227 			continue;
3228 
3229 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3230 			if (*total & FIF_ALLMULTI)
3231 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3232 								   false,
3233 								   NULL, 0);
3234 			else if (fp)
3235 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3236 							fp->enabled,
3237 							fp->mc_list,
3238 							fp->mc_list_length);
3239 			if (ret < 0)
3240 				goto out_sleep;
3241 		}
3242 
3243 		/*
3244 		 * If interface in AP mode and created with allmulticast then disable
3245 		 * the firmware filters so that all multicast packets are passed
3246 		 * This is mandatory for MDNS based discovery protocols
3247 		 */
3248  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3249  			if (*total & FIF_ALLMULTI) {
3250 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3251 							false,
3252 							NULL, 0);
3253 				if (ret < 0)
3254 					goto out_sleep;
3255 			}
3256 		}
3257 	}
3258 
3259 	/*
3260 	 * the fw doesn't provide an api to configure the filters. instead,
3261 	 * the filters configuration is based on the active roles / ROC
3262 	 * state.
3263 	 */
3264 
3265 out_sleep:
3266 	pm_runtime_mark_last_busy(wl->dev);
3267 	pm_runtime_put_autosuspend(wl->dev);
3268 
3269 out:
3270 	mutex_unlock(&wl->mutex);
3271 	kfree(fp);
3272 }
3273 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3274 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3275 				u8 id, u8 key_type, u8 key_size,
3276 				const u8 *key, u8 hlid, u32 tx_seq_32,
3277 				u16 tx_seq_16)
3278 {
3279 	struct wl1271_ap_key *ap_key;
3280 	int i;
3281 
3282 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3283 
3284 	if (key_size > MAX_KEY_SIZE)
3285 		return -EINVAL;
3286 
3287 	/*
3288 	 * Find next free entry in ap_keys. Also check we are not replacing
3289 	 * an existing key.
3290 	 */
3291 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3292 		if (wlvif->ap.recorded_keys[i] == NULL)
3293 			break;
3294 
3295 		if (wlvif->ap.recorded_keys[i]->id == id) {
3296 			wl1271_warning("trying to record key replacement");
3297 			return -EINVAL;
3298 		}
3299 	}
3300 
3301 	if (i == MAX_NUM_KEYS)
3302 		return -EBUSY;
3303 
3304 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3305 	if (!ap_key)
3306 		return -ENOMEM;
3307 
3308 	ap_key->id = id;
3309 	ap_key->key_type = key_type;
3310 	ap_key->key_size = key_size;
3311 	memcpy(ap_key->key, key, key_size);
3312 	ap_key->hlid = hlid;
3313 	ap_key->tx_seq_32 = tx_seq_32;
3314 	ap_key->tx_seq_16 = tx_seq_16;
3315 
3316 	wlvif->ap.recorded_keys[i] = ap_key;
3317 	return 0;
3318 }
3319 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3320 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3321 {
3322 	int i;
3323 
3324 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3325 		kfree(wlvif->ap.recorded_keys[i]);
3326 		wlvif->ap.recorded_keys[i] = NULL;
3327 	}
3328 }
3329 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3330 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3331 {
3332 	int i, ret = 0;
3333 	struct wl1271_ap_key *key;
3334 	bool wep_key_added = false;
3335 
3336 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3337 		u8 hlid;
3338 		if (wlvif->ap.recorded_keys[i] == NULL)
3339 			break;
3340 
3341 		key = wlvif->ap.recorded_keys[i];
3342 		hlid = key->hlid;
3343 		if (hlid == WL12XX_INVALID_LINK_ID)
3344 			hlid = wlvif->ap.bcast_hlid;
3345 
3346 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3347 					    key->id, key->key_type,
3348 					    key->key_size, key->key,
3349 					    hlid, key->tx_seq_32,
3350 					    key->tx_seq_16);
3351 		if (ret < 0)
3352 			goto out;
3353 
3354 		if (key->key_type == KEY_WEP)
3355 			wep_key_added = true;
3356 	}
3357 
3358 	if (wep_key_added) {
3359 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3360 						     wlvif->ap.bcast_hlid);
3361 		if (ret < 0)
3362 			goto out;
3363 	}
3364 
3365 out:
3366 	wl1271_free_ap_keys(wl, wlvif);
3367 	return ret;
3368 }
3369 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3370 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3371 		       u16 action, u8 id, u8 key_type,
3372 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3373 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3374 {
3375 	int ret;
3376 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3377 
3378 	if (is_ap) {
3379 		struct wl1271_station *wl_sta;
3380 		u8 hlid;
3381 
3382 		if (sta) {
3383 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3384 			hlid = wl_sta->hlid;
3385 		} else {
3386 			hlid = wlvif->ap.bcast_hlid;
3387 		}
3388 
3389 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3390 			/*
3391 			 * We do not support removing keys after AP shutdown.
3392 			 * Pretend we do to make mac80211 happy.
3393 			 */
3394 			if (action != KEY_ADD_OR_REPLACE)
3395 				return 0;
3396 
3397 			ret = wl1271_record_ap_key(wl, wlvif, id,
3398 					     key_type, key_size,
3399 					     key, hlid, tx_seq_32,
3400 					     tx_seq_16);
3401 		} else {
3402 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3403 					     id, key_type, key_size,
3404 					     key, hlid, tx_seq_32,
3405 					     tx_seq_16);
3406 		}
3407 
3408 		if (ret < 0)
3409 			return ret;
3410 	} else {
3411 		const u8 *addr;
3412 		static const u8 bcast_addr[ETH_ALEN] = {
3413 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3414 		};
3415 
3416 		addr = sta ? sta->addr : bcast_addr;
3417 
3418 		if (is_zero_ether_addr(addr)) {
3419 			/* We dont support TX only encryption */
3420 			return -EOPNOTSUPP;
3421 		}
3422 
3423 		/* The wl1271 does not allow to remove unicast keys - they
3424 		   will be cleared automatically on next CMD_JOIN. Ignore the
3425 		   request silently, as we dont want the mac80211 to emit
3426 		   an error message. */
3427 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3428 			return 0;
3429 
3430 		/* don't remove key if hlid was already deleted */
3431 		if (action == KEY_REMOVE &&
3432 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3433 			return 0;
3434 
3435 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3436 					     id, key_type, key_size,
3437 					     key, addr, tx_seq_32,
3438 					     tx_seq_16);
3439 		if (ret < 0)
3440 			return ret;
3441 
3442 	}
3443 
3444 	return 0;
3445 }
3446 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3447 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3448 			     struct ieee80211_vif *vif,
3449 			     struct ieee80211_sta *sta,
3450 			     struct ieee80211_key_conf *key_conf)
3451 {
3452 	struct wl1271 *wl = hw->priv;
3453 	int ret;
3454 	bool might_change_spare =
3455 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3456 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3457 
3458 	if (might_change_spare) {
3459 		/*
3460 		 * stop the queues and flush to ensure the next packets are
3461 		 * in sync with FW spare block accounting
3462 		 */
3463 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3464 		wl1271_tx_flush(wl);
3465 	}
3466 
3467 	mutex_lock(&wl->mutex);
3468 
3469 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3470 		ret = -EAGAIN;
3471 		goto out_wake_queues;
3472 	}
3473 
3474 	ret = pm_runtime_get_sync(wl->dev);
3475 	if (ret < 0) {
3476 		pm_runtime_put_noidle(wl->dev);
3477 		goto out_wake_queues;
3478 	}
3479 
3480 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3481 
3482 	pm_runtime_mark_last_busy(wl->dev);
3483 	pm_runtime_put_autosuspend(wl->dev);
3484 
3485 out_wake_queues:
3486 	if (might_change_spare)
3487 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3488 
3489 	mutex_unlock(&wl->mutex);
3490 
3491 	return ret;
3492 }
3493 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3494 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3495 		   struct ieee80211_vif *vif,
3496 		   struct ieee80211_sta *sta,
3497 		   struct ieee80211_key_conf *key_conf)
3498 {
3499 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3500 	int ret;
3501 	u32 tx_seq_32 = 0;
3502 	u16 tx_seq_16 = 0;
3503 	u8 key_type;
3504 	u8 hlid;
3505 
3506 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3507 
3508 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 		     key_conf->cipher, key_conf->keyidx,
3511 		     key_conf->keylen, key_conf->flags);
3512 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3513 
3514 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3515 		if (sta) {
3516 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 			hlid = wl_sta->hlid;
3518 		} else {
3519 			hlid = wlvif->ap.bcast_hlid;
3520 		}
3521 	else
3522 		hlid = wlvif->sta.hlid;
3523 
3524 	if (hlid != WL12XX_INVALID_LINK_ID) {
3525 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3528 	}
3529 
3530 	switch (key_conf->cipher) {
3531 	case WLAN_CIPHER_SUITE_WEP40:
3532 	case WLAN_CIPHER_SUITE_WEP104:
3533 		key_type = KEY_WEP;
3534 
3535 		key_conf->hw_key_idx = key_conf->keyidx;
3536 		break;
3537 	case WLAN_CIPHER_SUITE_TKIP:
3538 		key_type = KEY_TKIP;
3539 		key_conf->hw_key_idx = key_conf->keyidx;
3540 		break;
3541 	case WLAN_CIPHER_SUITE_CCMP:
3542 		key_type = KEY_AES;
3543 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3544 		break;
3545 	case WL1271_CIPHER_SUITE_GEM:
3546 		key_type = KEY_GEM;
3547 		break;
3548 	default:
3549 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3550 
3551 		return -EOPNOTSUPP;
3552 	}
3553 
3554 	switch (cmd) {
3555 	case SET_KEY:
3556 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3557 				 key_conf->keyidx, key_type,
3558 				 key_conf->keylen, key_conf->key,
3559 				 tx_seq_32, tx_seq_16, sta);
3560 		if (ret < 0) {
3561 			wl1271_error("Could not add or replace key");
3562 			return ret;
3563 		}
3564 
3565 		/*
3566 		 * reconfiguring arp response if the unicast (or common)
3567 		 * encryption key type was changed
3568 		 */
3569 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3570 		    (sta || key_type == KEY_WEP) &&
3571 		    wlvif->encryption_type != key_type) {
3572 			wlvif->encryption_type = key_type;
3573 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3574 			if (ret < 0) {
3575 				wl1271_warning("build arp rsp failed: %d", ret);
3576 				return ret;
3577 			}
3578 		}
3579 		break;
3580 
3581 	case DISABLE_KEY:
3582 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3583 				     key_conf->keyidx, key_type,
3584 				     key_conf->keylen, key_conf->key,
3585 				     0, 0, sta);
3586 		if (ret < 0) {
3587 			wl1271_error("Could not remove key");
3588 			return ret;
3589 		}
3590 		break;
3591 
3592 	default:
3593 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3594 		return -EOPNOTSUPP;
3595 	}
3596 
3597 	return ret;
3598 }
3599 EXPORT_SYMBOL_GPL(wlcore_set_key);
3600 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3601 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3602 					  struct ieee80211_vif *vif,
3603 					  int key_idx)
3604 {
3605 	struct wl1271 *wl = hw->priv;
3606 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3607 	int ret;
3608 
3609 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3610 		     key_idx);
3611 
3612 	/* we don't handle unsetting of default key */
3613 	if (key_idx == -1)
3614 		return;
3615 
3616 	mutex_lock(&wl->mutex);
3617 
3618 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3619 		ret = -EAGAIN;
3620 		goto out_unlock;
3621 	}
3622 
3623 	ret = pm_runtime_get_sync(wl->dev);
3624 	if (ret < 0) {
3625 		pm_runtime_put_noidle(wl->dev);
3626 		goto out_unlock;
3627 	}
3628 
3629 	wlvif->default_key = key_idx;
3630 
3631 	/* the default WEP key needs to be configured at least once */
3632 	if (wlvif->encryption_type == KEY_WEP) {
3633 		ret = wl12xx_cmd_set_default_wep_key(wl,
3634 				key_idx,
3635 				wlvif->sta.hlid);
3636 		if (ret < 0)
3637 			goto out_sleep;
3638 	}
3639 
3640 out_sleep:
3641 	pm_runtime_mark_last_busy(wl->dev);
3642 	pm_runtime_put_autosuspend(wl->dev);
3643 
3644 out_unlock:
3645 	mutex_unlock(&wl->mutex);
3646 }
3647 
wlcore_regdomain_config(struct wl1271 * wl)3648 void wlcore_regdomain_config(struct wl1271 *wl)
3649 {
3650 	int ret;
3651 
3652 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3653 		return;
3654 
3655 	mutex_lock(&wl->mutex);
3656 
3657 	if (unlikely(wl->state != WLCORE_STATE_ON))
3658 		goto out;
3659 
3660 	ret = pm_runtime_get_sync(wl->dev);
3661 	if (ret < 0)
3662 		goto out;
3663 
3664 	ret = wlcore_cmd_regdomain_config_locked(wl);
3665 	if (ret < 0) {
3666 		wl12xx_queue_recovery_work(wl);
3667 		goto out;
3668 	}
3669 
3670 	pm_runtime_mark_last_busy(wl->dev);
3671 	pm_runtime_put_autosuspend(wl->dev);
3672 out:
3673 	mutex_unlock(&wl->mutex);
3674 }
3675 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3676 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3677 			     struct ieee80211_vif *vif,
3678 			     struct ieee80211_scan_request *hw_req)
3679 {
3680 	struct cfg80211_scan_request *req = &hw_req->req;
3681 	struct wl1271 *wl = hw->priv;
3682 	int ret;
3683 	u8 *ssid = NULL;
3684 	size_t len = 0;
3685 
3686 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3687 
3688 	if (req->n_ssids) {
3689 		ssid = req->ssids[0].ssid;
3690 		len = req->ssids[0].ssid_len;
3691 	}
3692 
3693 	mutex_lock(&wl->mutex);
3694 
3695 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3696 		/*
3697 		 * We cannot return -EBUSY here because cfg80211 will expect
3698 		 * a call to ieee80211_scan_completed if we do - in this case
3699 		 * there won't be any call.
3700 		 */
3701 		ret = -EAGAIN;
3702 		goto out;
3703 	}
3704 
3705 	ret = pm_runtime_get_sync(wl->dev);
3706 	if (ret < 0) {
3707 		pm_runtime_put_noidle(wl->dev);
3708 		goto out;
3709 	}
3710 
3711 	/* fail if there is any role in ROC */
3712 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3713 		/* don't allow scanning right now */
3714 		ret = -EBUSY;
3715 		goto out_sleep;
3716 	}
3717 
3718 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3719 out_sleep:
3720 	pm_runtime_mark_last_busy(wl->dev);
3721 	pm_runtime_put_autosuspend(wl->dev);
3722 out:
3723 	mutex_unlock(&wl->mutex);
3724 
3725 	return ret;
3726 }
3727 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3728 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3729 				     struct ieee80211_vif *vif)
3730 {
3731 	struct wl1271 *wl = hw->priv;
3732 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3733 	struct cfg80211_scan_info info = {
3734 		.aborted = true,
3735 	};
3736 	int ret;
3737 
3738 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3739 
3740 	mutex_lock(&wl->mutex);
3741 
3742 	if (unlikely(wl->state != WLCORE_STATE_ON))
3743 		goto out;
3744 
3745 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3746 		goto out;
3747 
3748 	ret = pm_runtime_get_sync(wl->dev);
3749 	if (ret < 0) {
3750 		pm_runtime_put_noidle(wl->dev);
3751 		goto out;
3752 	}
3753 
3754 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3755 		ret = wl->ops->scan_stop(wl, wlvif);
3756 		if (ret < 0)
3757 			goto out_sleep;
3758 	}
3759 
3760 	/*
3761 	 * Rearm the tx watchdog just before idling scan. This
3762 	 * prevents just-finished scans from triggering the watchdog
3763 	 */
3764 	wl12xx_rearm_tx_watchdog_locked(wl);
3765 
3766 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3767 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3768 	wl->scan_wlvif = NULL;
3769 	wl->scan.req = NULL;
3770 	ieee80211_scan_completed(wl->hw, &info);
3771 
3772 out_sleep:
3773 	pm_runtime_mark_last_busy(wl->dev);
3774 	pm_runtime_put_autosuspend(wl->dev);
3775 out:
3776 	mutex_unlock(&wl->mutex);
3777 
3778 	cancel_delayed_work_sync(&wl->scan_complete_work);
3779 }
3780 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3781 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3782 				      struct ieee80211_vif *vif,
3783 				      struct cfg80211_sched_scan_request *req,
3784 				      struct ieee80211_scan_ies *ies)
3785 {
3786 	struct wl1271 *wl = hw->priv;
3787 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3788 	int ret;
3789 
3790 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3791 
3792 	mutex_lock(&wl->mutex);
3793 
3794 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3795 		ret = -EAGAIN;
3796 		goto out;
3797 	}
3798 
3799 	ret = pm_runtime_get_sync(wl->dev);
3800 	if (ret < 0) {
3801 		pm_runtime_put_noidle(wl->dev);
3802 		goto out;
3803 	}
3804 
3805 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3806 	if (ret < 0)
3807 		goto out_sleep;
3808 
3809 	wl->sched_vif = wlvif;
3810 
3811 out_sleep:
3812 	pm_runtime_mark_last_busy(wl->dev);
3813 	pm_runtime_put_autosuspend(wl->dev);
3814 out:
3815 	mutex_unlock(&wl->mutex);
3816 	return ret;
3817 }
3818 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3819 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3820 				     struct ieee80211_vif *vif)
3821 {
3822 	struct wl1271 *wl = hw->priv;
3823 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3824 	int ret;
3825 
3826 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3827 
3828 	mutex_lock(&wl->mutex);
3829 
3830 	if (unlikely(wl->state != WLCORE_STATE_ON))
3831 		goto out;
3832 
3833 	ret = pm_runtime_get_sync(wl->dev);
3834 	if (ret < 0) {
3835 		pm_runtime_put_noidle(wl->dev);
3836 		goto out;
3837 	}
3838 
3839 	wl->ops->sched_scan_stop(wl, wlvif);
3840 
3841 	pm_runtime_mark_last_busy(wl->dev);
3842 	pm_runtime_put_autosuspend(wl->dev);
3843 out:
3844 	mutex_unlock(&wl->mutex);
3845 
3846 	return 0;
3847 }
3848 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3849 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3850 {
3851 	struct wl1271 *wl = hw->priv;
3852 	int ret = 0;
3853 
3854 	mutex_lock(&wl->mutex);
3855 
3856 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3857 		ret = -EAGAIN;
3858 		goto out;
3859 	}
3860 
3861 	ret = pm_runtime_get_sync(wl->dev);
3862 	if (ret < 0) {
3863 		pm_runtime_put_noidle(wl->dev);
3864 		goto out;
3865 	}
3866 
3867 	ret = wl1271_acx_frag_threshold(wl, value);
3868 	if (ret < 0)
3869 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3870 
3871 	pm_runtime_mark_last_busy(wl->dev);
3872 	pm_runtime_put_autosuspend(wl->dev);
3873 
3874 out:
3875 	mutex_unlock(&wl->mutex);
3876 
3877 	return ret;
3878 }
3879 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3880 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3881 {
3882 	struct wl1271 *wl = hw->priv;
3883 	struct wl12xx_vif *wlvif;
3884 	int ret = 0;
3885 
3886 	mutex_lock(&wl->mutex);
3887 
3888 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3889 		ret = -EAGAIN;
3890 		goto out;
3891 	}
3892 
3893 	ret = pm_runtime_get_sync(wl->dev);
3894 	if (ret < 0) {
3895 		pm_runtime_put_noidle(wl->dev);
3896 		goto out;
3897 	}
3898 
3899 	wl12xx_for_each_wlvif(wl, wlvif) {
3900 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3901 		if (ret < 0)
3902 			wl1271_warning("set rts threshold failed: %d", ret);
3903 	}
3904 	pm_runtime_mark_last_busy(wl->dev);
3905 	pm_runtime_put_autosuspend(wl->dev);
3906 
3907 out:
3908 	mutex_unlock(&wl->mutex);
3909 
3910 	return ret;
3911 }
3912 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3913 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3914 {
3915 	int len;
3916 	const u8 *next, *end = skb->data + skb->len;
3917 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3918 					skb->len - ieoffset);
3919 	if (!ie)
3920 		return;
3921 	len = ie[1] + 2;
3922 	next = ie + len;
3923 	memmove(ie, next, end - next);
3924 	skb_trim(skb, skb->len - len);
3925 }
3926 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3927 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3928 					    unsigned int oui, u8 oui_type,
3929 					    int ieoffset)
3930 {
3931 	int len;
3932 	const u8 *next, *end = skb->data + skb->len;
3933 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3934 					       skb->data + ieoffset,
3935 					       skb->len - ieoffset);
3936 	if (!ie)
3937 		return;
3938 	len = ie[1] + 2;
3939 	next = ie + len;
3940 	memmove(ie, next, end - next);
3941 	skb_trim(skb, skb->len - len);
3942 }
3943 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3944 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3945 					 struct ieee80211_vif *vif)
3946 {
3947 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3948 	struct sk_buff *skb;
3949 	int ret;
3950 
3951 	skb = ieee80211_proberesp_get(wl->hw, vif);
3952 	if (!skb)
3953 		return -EOPNOTSUPP;
3954 
3955 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3956 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3957 				      skb->data,
3958 				      skb->len, 0,
3959 				      rates);
3960 	dev_kfree_skb(skb);
3961 
3962 	if (ret < 0)
3963 		goto out;
3964 
3965 	wl1271_debug(DEBUG_AP, "probe response updated");
3966 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3967 
3968 out:
3969 	return ret;
3970 }
3971 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3972 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3973 					     struct ieee80211_vif *vif,
3974 					     u8 *probe_rsp_data,
3975 					     size_t probe_rsp_len,
3976 					     u32 rates)
3977 {
3978 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3979 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3980 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3981 	int ssid_ie_offset, ie_offset, templ_len;
3982 	const u8 *ptr;
3983 
3984 	/* no need to change probe response if the SSID is set correctly */
3985 	if (wlvif->ssid_len > 0)
3986 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3987 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3988 					       probe_rsp_data,
3989 					       probe_rsp_len, 0,
3990 					       rates);
3991 
3992 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3993 		wl1271_error("probe_rsp template too big");
3994 		return -EINVAL;
3995 	}
3996 
3997 	/* start searching from IE offset */
3998 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3999 
4000 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4001 			       probe_rsp_len - ie_offset);
4002 	if (!ptr) {
4003 		wl1271_error("No SSID in beacon!");
4004 		return -EINVAL;
4005 	}
4006 
4007 	ssid_ie_offset = ptr - probe_rsp_data;
4008 	ptr += (ptr[1] + 2);
4009 
4010 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4011 
4012 	/* insert SSID from bss_conf */
4013 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4014 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4015 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4016 	       bss_conf->ssid, bss_conf->ssid_len);
4017 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4018 
4019 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4020 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4021 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4022 
4023 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4024 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4025 				       probe_rsp_templ,
4026 				       templ_len, 0,
4027 				       rates);
4028 }
4029 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4030 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4031 				       struct ieee80211_vif *vif,
4032 				       struct ieee80211_bss_conf *bss_conf,
4033 				       u32 changed)
4034 {
4035 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4036 	int ret = 0;
4037 
4038 	if (changed & BSS_CHANGED_ERP_SLOT) {
4039 		if (bss_conf->use_short_slot)
4040 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4041 		else
4042 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4043 		if (ret < 0) {
4044 			wl1271_warning("Set slot time failed %d", ret);
4045 			goto out;
4046 		}
4047 	}
4048 
4049 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4050 		if (bss_conf->use_short_preamble)
4051 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4052 		else
4053 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4054 	}
4055 
4056 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4057 		if (bss_conf->use_cts_prot)
4058 			ret = wl1271_acx_cts_protect(wl, wlvif,
4059 						     CTSPROTECT_ENABLE);
4060 		else
4061 			ret = wl1271_acx_cts_protect(wl, wlvif,
4062 						     CTSPROTECT_DISABLE);
4063 		if (ret < 0) {
4064 			wl1271_warning("Set ctsprotect failed %d", ret);
4065 			goto out;
4066 		}
4067 	}
4068 
4069 out:
4070 	return ret;
4071 }
4072 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4073 static int wlcore_set_beacon_template(struct wl1271 *wl,
4074 				      struct ieee80211_vif *vif,
4075 				      bool is_ap)
4076 {
4077 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4078 	struct ieee80211_hdr *hdr;
4079 	u32 min_rate;
4080 	int ret;
4081 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4082 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4083 	u16 tmpl_id;
4084 
4085 	if (!beacon) {
4086 		ret = -EINVAL;
4087 		goto out;
4088 	}
4089 
4090 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4091 
4092 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4093 	if (ret < 0) {
4094 		dev_kfree_skb(beacon);
4095 		goto out;
4096 	}
4097 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4098 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4099 		CMD_TEMPL_BEACON;
4100 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4101 				      beacon->data,
4102 				      beacon->len, 0,
4103 				      min_rate);
4104 	if (ret < 0) {
4105 		dev_kfree_skb(beacon);
4106 		goto out;
4107 	}
4108 
4109 	wlvif->wmm_enabled =
4110 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4111 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4112 					beacon->data + ieoffset,
4113 					beacon->len - ieoffset);
4114 
4115 	/*
4116 	 * In case we already have a probe-resp beacon set explicitly
4117 	 * by usermode, don't use the beacon data.
4118 	 */
4119 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4120 		goto end_bcn;
4121 
4122 	/* remove TIM ie from probe response */
4123 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4124 
4125 	/*
4126 	 * remove p2p ie from probe response.
4127 	 * the fw reponds to probe requests that don't include
4128 	 * the p2p ie. probe requests with p2p ie will be passed,
4129 	 * and will be responded by the supplicant (the spec
4130 	 * forbids including the p2p ie when responding to probe
4131 	 * requests that didn't include it).
4132 	 */
4133 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4134 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4135 
4136 	hdr = (struct ieee80211_hdr *) beacon->data;
4137 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4138 					 IEEE80211_STYPE_PROBE_RESP);
4139 	if (is_ap)
4140 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4141 							   beacon->data,
4142 							   beacon->len,
4143 							   min_rate);
4144 	else
4145 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4146 					      CMD_TEMPL_PROBE_RESPONSE,
4147 					      beacon->data,
4148 					      beacon->len, 0,
4149 					      min_rate);
4150 end_bcn:
4151 	dev_kfree_skb(beacon);
4152 	if (ret < 0)
4153 		goto out;
4154 
4155 out:
4156 	return ret;
4157 }
4158 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4159 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4160 					  struct ieee80211_vif *vif,
4161 					  struct ieee80211_bss_conf *bss_conf,
4162 					  u32 changed)
4163 {
4164 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4165 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4166 	int ret = 0;
4167 
4168 	if (changed & BSS_CHANGED_BEACON_INT) {
4169 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4170 			bss_conf->beacon_int);
4171 
4172 		wlvif->beacon_int = bss_conf->beacon_int;
4173 	}
4174 
4175 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4176 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4177 
4178 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4179 	}
4180 
4181 	if (changed & BSS_CHANGED_BEACON) {
4182 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4183 		if (ret < 0)
4184 			goto out;
4185 
4186 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4187 				       &wlvif->flags)) {
4188 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4189 			if (ret < 0)
4190 				goto out;
4191 		}
4192 	}
4193 out:
4194 	if (ret != 0)
4195 		wl1271_error("beacon info change failed: %d", ret);
4196 	return ret;
4197 }
4198 
4199 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4200 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4201 				       struct ieee80211_vif *vif,
4202 				       struct ieee80211_bss_conf *bss_conf,
4203 				       u32 changed)
4204 {
4205 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4206 	int ret = 0;
4207 
4208 	if (changed & BSS_CHANGED_BASIC_RATES) {
4209 		u32 rates = bss_conf->basic_rates;
4210 
4211 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4212 								 wlvif->band);
4213 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4214 							wlvif->basic_rate_set);
4215 
4216 		ret = wl1271_init_ap_rates(wl, wlvif);
4217 		if (ret < 0) {
4218 			wl1271_error("AP rate policy change failed %d", ret);
4219 			goto out;
4220 		}
4221 
4222 		ret = wl1271_ap_init_templates(wl, vif);
4223 		if (ret < 0)
4224 			goto out;
4225 
4226 		/* No need to set probe resp template for mesh */
4227 		if (!ieee80211_vif_is_mesh(vif)) {
4228 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4229 							    wlvif->basic_rate,
4230 							    vif);
4231 			if (ret < 0)
4232 				goto out;
4233 		}
4234 
4235 		ret = wlcore_set_beacon_template(wl, vif, true);
4236 		if (ret < 0)
4237 			goto out;
4238 	}
4239 
4240 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4241 	if (ret < 0)
4242 		goto out;
4243 
4244 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4245 		if (bss_conf->enable_beacon) {
4246 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4247 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4248 				if (ret < 0)
4249 					goto out;
4250 
4251 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4252 				if (ret < 0)
4253 					goto out;
4254 
4255 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4256 				wl1271_debug(DEBUG_AP, "started AP");
4257 			}
4258 		} else {
4259 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4260 				/*
4261 				 * AP might be in ROC in case we have just
4262 				 * sent auth reply. handle it.
4263 				 */
4264 				if (test_bit(wlvif->role_id, wl->roc_map))
4265 					wl12xx_croc(wl, wlvif->role_id);
4266 
4267 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4268 				if (ret < 0)
4269 					goto out;
4270 
4271 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4272 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4273 					  &wlvif->flags);
4274 				wl1271_debug(DEBUG_AP, "stopped AP");
4275 			}
4276 		}
4277 	}
4278 
4279 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4280 	if (ret < 0)
4281 		goto out;
4282 
4283 	/* Handle HT information change */
4284 	if ((changed & BSS_CHANGED_HT) &&
4285 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4286 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4287 					bss_conf->ht_operation_mode);
4288 		if (ret < 0) {
4289 			wl1271_warning("Set ht information failed %d", ret);
4290 			goto out;
4291 		}
4292 	}
4293 
4294 out:
4295 	return;
4296 }
4297 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4298 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4299 			    struct ieee80211_bss_conf *bss_conf,
4300 			    u32 sta_rate_set)
4301 {
4302 	u32 rates;
4303 	int ret;
4304 
4305 	wl1271_debug(DEBUG_MAC80211,
4306 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4307 	     bss_conf->bssid, bss_conf->aid,
4308 	     bss_conf->beacon_int,
4309 	     bss_conf->basic_rates, sta_rate_set);
4310 
4311 	wlvif->beacon_int = bss_conf->beacon_int;
4312 	rates = bss_conf->basic_rates;
4313 	wlvif->basic_rate_set =
4314 		wl1271_tx_enabled_rates_get(wl, rates,
4315 					    wlvif->band);
4316 	wlvif->basic_rate =
4317 		wl1271_tx_min_rate_get(wl,
4318 				       wlvif->basic_rate_set);
4319 
4320 	if (sta_rate_set)
4321 		wlvif->rate_set =
4322 			wl1271_tx_enabled_rates_get(wl,
4323 						sta_rate_set,
4324 						wlvif->band);
4325 
4326 	/* we only support sched_scan while not connected */
4327 	if (wl->sched_vif == wlvif)
4328 		wl->ops->sched_scan_stop(wl, wlvif);
4329 
4330 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4331 	if (ret < 0)
4332 		return ret;
4333 
4334 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4335 	if (ret < 0)
4336 		return ret;
4337 
4338 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4339 	if (ret < 0)
4340 		return ret;
4341 
4342 	wlcore_set_ssid(wl, wlvif);
4343 
4344 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4345 
4346 	return 0;
4347 }
4348 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4349 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4350 {
4351 	int ret;
4352 
4353 	/* revert back to minimum rates for the current band */
4354 	wl1271_set_band_rate(wl, wlvif);
4355 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4356 
4357 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4358 	if (ret < 0)
4359 		return ret;
4360 
4361 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4362 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4363 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4364 		if (ret < 0)
4365 			return ret;
4366 	}
4367 
4368 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4369 	return 0;
4370 }
4371 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4372 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4373 					struct ieee80211_vif *vif,
4374 					struct ieee80211_bss_conf *bss_conf,
4375 					u32 changed)
4376 {
4377 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4378 	bool do_join = false;
4379 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4380 	bool ibss_joined = false;
4381 	u32 sta_rate_set = 0;
4382 	int ret;
4383 	struct ieee80211_sta *sta;
4384 	bool sta_exists = false;
4385 	struct ieee80211_sta_ht_cap sta_ht_cap;
4386 
4387 	if (is_ibss) {
4388 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4389 						     changed);
4390 		if (ret < 0)
4391 			goto out;
4392 	}
4393 
4394 	if (changed & BSS_CHANGED_IBSS) {
4395 		if (bss_conf->ibss_joined) {
4396 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4397 			ibss_joined = true;
4398 		} else {
4399 			wlcore_unset_assoc(wl, wlvif);
4400 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4401 		}
4402 	}
4403 
4404 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4405 		do_join = true;
4406 
4407 	/* Need to update the SSID (for filtering etc) */
4408 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4409 		do_join = true;
4410 
4411 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4412 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4413 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4414 
4415 		do_join = true;
4416 	}
4417 
4418 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4419 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4420 
4421 	if (changed & BSS_CHANGED_CQM) {
4422 		bool enable = false;
4423 		if (bss_conf->cqm_rssi_thold)
4424 			enable = true;
4425 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4426 						  bss_conf->cqm_rssi_thold,
4427 						  bss_conf->cqm_rssi_hyst);
4428 		if (ret < 0)
4429 			goto out;
4430 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4431 	}
4432 
4433 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4434 		       BSS_CHANGED_ASSOC)) {
4435 		rcu_read_lock();
4436 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4437 		if (sta) {
4438 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4439 
4440 			/* save the supp_rates of the ap */
4441 			sta_rate_set = sta->supp_rates[wlvif->band];
4442 			if (sta->ht_cap.ht_supported)
4443 				sta_rate_set |=
4444 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4445 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4446 			sta_ht_cap = sta->ht_cap;
4447 			sta_exists = true;
4448 		}
4449 
4450 		rcu_read_unlock();
4451 	}
4452 
4453 	if (changed & BSS_CHANGED_BSSID) {
4454 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4455 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4456 					       sta_rate_set);
4457 			if (ret < 0)
4458 				goto out;
4459 
4460 			/* Need to update the BSSID (for filtering etc) */
4461 			do_join = true;
4462 		} else {
4463 			ret = wlcore_clear_bssid(wl, wlvif);
4464 			if (ret < 0)
4465 				goto out;
4466 		}
4467 	}
4468 
4469 	if (changed & BSS_CHANGED_IBSS) {
4470 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4471 			     bss_conf->ibss_joined);
4472 
4473 		if (bss_conf->ibss_joined) {
4474 			u32 rates = bss_conf->basic_rates;
4475 			wlvif->basic_rate_set =
4476 				wl1271_tx_enabled_rates_get(wl, rates,
4477 							    wlvif->band);
4478 			wlvif->basic_rate =
4479 				wl1271_tx_min_rate_get(wl,
4480 						       wlvif->basic_rate_set);
4481 
4482 			/* by default, use 11b + OFDM rates */
4483 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4484 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4485 			if (ret < 0)
4486 				goto out;
4487 		}
4488 	}
4489 
4490 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4491 		/* enable beacon filtering */
4492 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4493 		if (ret < 0)
4494 			goto out;
4495 	}
4496 
4497 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4498 	if (ret < 0)
4499 		goto out;
4500 
4501 	if (do_join) {
4502 		ret = wlcore_join(wl, wlvif);
4503 		if (ret < 0) {
4504 			wl1271_warning("cmd join failed %d", ret);
4505 			goto out;
4506 		}
4507 	}
4508 
4509 	if (changed & BSS_CHANGED_ASSOC) {
4510 		if (bss_conf->assoc) {
4511 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4512 					       sta_rate_set);
4513 			if (ret < 0)
4514 				goto out;
4515 
4516 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4517 				wl12xx_set_authorized(wl, wlvif);
4518 		} else {
4519 			wlcore_unset_assoc(wl, wlvif);
4520 		}
4521 	}
4522 
4523 	if (changed & BSS_CHANGED_PS) {
4524 		if ((bss_conf->ps) &&
4525 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4526 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4527 			int ps_mode;
4528 			char *ps_mode_str;
4529 
4530 			if (wl->conf.conn.forced_ps) {
4531 				ps_mode = STATION_POWER_SAVE_MODE;
4532 				ps_mode_str = "forced";
4533 			} else {
4534 				ps_mode = STATION_AUTO_PS_MODE;
4535 				ps_mode_str = "auto";
4536 			}
4537 
4538 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4539 
4540 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4541 			if (ret < 0)
4542 				wl1271_warning("enter %s ps failed %d",
4543 					       ps_mode_str, ret);
4544 		} else if (!bss_conf->ps &&
4545 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4546 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4547 
4548 			ret = wl1271_ps_set_mode(wl, wlvif,
4549 						 STATION_ACTIVE_MODE);
4550 			if (ret < 0)
4551 				wl1271_warning("exit auto ps failed %d", ret);
4552 		}
4553 	}
4554 
4555 	/* Handle new association with HT. Do this after join. */
4556 	if (sta_exists) {
4557 		bool enabled =
4558 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4559 
4560 		ret = wlcore_hw_set_peer_cap(wl,
4561 					     &sta_ht_cap,
4562 					     enabled,
4563 					     wlvif->rate_set,
4564 					     wlvif->sta.hlid);
4565 		if (ret < 0) {
4566 			wl1271_warning("Set ht cap failed %d", ret);
4567 			goto out;
4568 
4569 		}
4570 
4571 		if (enabled) {
4572 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4573 						bss_conf->ht_operation_mode);
4574 			if (ret < 0) {
4575 				wl1271_warning("Set ht information failed %d",
4576 					       ret);
4577 				goto out;
4578 			}
4579 		}
4580 	}
4581 
4582 	/* Handle arp filtering. Done after join. */
4583 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4584 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4585 		__be32 addr = bss_conf->arp_addr_list[0];
4586 		wlvif->sta.qos = bss_conf->qos;
4587 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4588 
4589 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4590 			wlvif->ip_addr = addr;
4591 			/*
4592 			 * The template should have been configured only upon
4593 			 * association. however, it seems that the correct ip
4594 			 * isn't being set (when sending), so we have to
4595 			 * reconfigure the template upon every ip change.
4596 			 */
4597 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4598 			if (ret < 0) {
4599 				wl1271_warning("build arp rsp failed: %d", ret);
4600 				goto out;
4601 			}
4602 
4603 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4604 				(ACX_ARP_FILTER_ARP_FILTERING |
4605 				 ACX_ARP_FILTER_AUTO_ARP),
4606 				addr);
4607 		} else {
4608 			wlvif->ip_addr = 0;
4609 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4610 		}
4611 
4612 		if (ret < 0)
4613 			goto out;
4614 	}
4615 
4616 out:
4617 	return;
4618 }
4619 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4620 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4621 				       struct ieee80211_vif *vif,
4622 				       struct ieee80211_bss_conf *bss_conf,
4623 				       u32 changed)
4624 {
4625 	struct wl1271 *wl = hw->priv;
4626 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4627 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4628 	int ret;
4629 
4630 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4631 		     wlvif->role_id, (int)changed);
4632 
4633 	/*
4634 	 * make sure to cancel pending disconnections if our association
4635 	 * state changed
4636 	 */
4637 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4638 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4639 
4640 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4641 	    !bss_conf->enable_beacon)
4642 		wl1271_tx_flush(wl);
4643 
4644 	mutex_lock(&wl->mutex);
4645 
4646 	if (unlikely(wl->state != WLCORE_STATE_ON))
4647 		goto out;
4648 
4649 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4650 		goto out;
4651 
4652 	ret = pm_runtime_get_sync(wl->dev);
4653 	if (ret < 0) {
4654 		pm_runtime_put_noidle(wl->dev);
4655 		goto out;
4656 	}
4657 
4658 	if ((changed & BSS_CHANGED_TXPOWER) &&
4659 	    bss_conf->txpower != wlvif->power_level) {
4660 
4661 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4662 		if (ret < 0)
4663 			goto out;
4664 
4665 		wlvif->power_level = bss_conf->txpower;
4666 	}
4667 
4668 	if (is_ap)
4669 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4670 	else
4671 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4672 
4673 	pm_runtime_mark_last_busy(wl->dev);
4674 	pm_runtime_put_autosuspend(wl->dev);
4675 
4676 out:
4677 	mutex_unlock(&wl->mutex);
4678 }
4679 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4680 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4681 				 struct ieee80211_chanctx_conf *ctx)
4682 {
4683 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4684 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4685 		     cfg80211_get_chandef_type(&ctx->def));
4686 	return 0;
4687 }
4688 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4689 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4690 				     struct ieee80211_chanctx_conf *ctx)
4691 {
4692 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4693 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4694 		     cfg80211_get_chandef_type(&ctx->def));
4695 }
4696 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4697 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4698 				     struct ieee80211_chanctx_conf *ctx,
4699 				     u32 changed)
4700 {
4701 	struct wl1271 *wl = hw->priv;
4702 	struct wl12xx_vif *wlvif;
4703 	int ret;
4704 	int channel = ieee80211_frequency_to_channel(
4705 		ctx->def.chan->center_freq);
4706 
4707 	wl1271_debug(DEBUG_MAC80211,
4708 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4709 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4710 
4711 	mutex_lock(&wl->mutex);
4712 
4713 	ret = pm_runtime_get_sync(wl->dev);
4714 	if (ret < 0) {
4715 		pm_runtime_put_noidle(wl->dev);
4716 		goto out;
4717 	}
4718 
4719 	wl12xx_for_each_wlvif(wl, wlvif) {
4720 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4721 
4722 		rcu_read_lock();
4723 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4724 			rcu_read_unlock();
4725 			continue;
4726 		}
4727 		rcu_read_unlock();
4728 
4729 		/* start radar if needed */
4730 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4731 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4732 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4733 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4734 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4735 			wlcore_hw_set_cac(wl, wlvif, true);
4736 			wlvif->radar_enabled = true;
4737 		}
4738 	}
4739 
4740 	pm_runtime_mark_last_busy(wl->dev);
4741 	pm_runtime_put_autosuspend(wl->dev);
4742 out:
4743 	mutex_unlock(&wl->mutex);
4744 }
4745 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4746 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4747 					struct ieee80211_vif *vif,
4748 					struct ieee80211_chanctx_conf *ctx)
4749 {
4750 	struct wl1271 *wl = hw->priv;
4751 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4752 	int channel = ieee80211_frequency_to_channel(
4753 		ctx->def.chan->center_freq);
4754 	int ret = -EINVAL;
4755 
4756 	wl1271_debug(DEBUG_MAC80211,
4757 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4758 		     wlvif->role_id, channel,
4759 		     cfg80211_get_chandef_type(&ctx->def),
4760 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4761 
4762 	mutex_lock(&wl->mutex);
4763 
4764 	if (unlikely(wl->state != WLCORE_STATE_ON))
4765 		goto out;
4766 
4767 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4768 		goto out;
4769 
4770 	ret = pm_runtime_get_sync(wl->dev);
4771 	if (ret < 0) {
4772 		pm_runtime_put_noidle(wl->dev);
4773 		goto out;
4774 	}
4775 
4776 	wlvif->band = ctx->def.chan->band;
4777 	wlvif->channel = channel;
4778 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4779 
4780 	/* update default rates according to the band */
4781 	wl1271_set_band_rate(wl, wlvif);
4782 
4783 	if (ctx->radar_enabled &&
4784 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4785 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4786 		wlcore_hw_set_cac(wl, wlvif, true);
4787 		wlvif->radar_enabled = true;
4788 	}
4789 
4790 	pm_runtime_mark_last_busy(wl->dev);
4791 	pm_runtime_put_autosuspend(wl->dev);
4792 out:
4793 	mutex_unlock(&wl->mutex);
4794 
4795 	return 0;
4796 }
4797 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4798 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4799 					   struct ieee80211_vif *vif,
4800 					   struct ieee80211_chanctx_conf *ctx)
4801 {
4802 	struct wl1271 *wl = hw->priv;
4803 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4804 	int ret;
4805 
4806 	wl1271_debug(DEBUG_MAC80211,
4807 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4808 		     wlvif->role_id,
4809 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4810 		     cfg80211_get_chandef_type(&ctx->def));
4811 
4812 	wl1271_tx_flush(wl);
4813 
4814 	mutex_lock(&wl->mutex);
4815 
4816 	if (unlikely(wl->state != WLCORE_STATE_ON))
4817 		goto out;
4818 
4819 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4820 		goto out;
4821 
4822 	ret = pm_runtime_get_sync(wl->dev);
4823 	if (ret < 0) {
4824 		pm_runtime_put_noidle(wl->dev);
4825 		goto out;
4826 	}
4827 
4828 	if (wlvif->radar_enabled) {
4829 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4830 		wlcore_hw_set_cac(wl, wlvif, false);
4831 		wlvif->radar_enabled = false;
4832 	}
4833 
4834 	pm_runtime_mark_last_busy(wl->dev);
4835 	pm_runtime_put_autosuspend(wl->dev);
4836 out:
4837 	mutex_unlock(&wl->mutex);
4838 }
4839 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4840 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4841 				    struct wl12xx_vif *wlvif,
4842 				    struct ieee80211_chanctx_conf *new_ctx)
4843 {
4844 	int channel = ieee80211_frequency_to_channel(
4845 		new_ctx->def.chan->center_freq);
4846 
4847 	wl1271_debug(DEBUG_MAC80211,
4848 		     "switch vif (role %d) %d -> %d chan_type: %d",
4849 		     wlvif->role_id, wlvif->channel, channel,
4850 		     cfg80211_get_chandef_type(&new_ctx->def));
4851 
4852 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4853 		return 0;
4854 
4855 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4856 
4857 	if (wlvif->radar_enabled) {
4858 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4859 		wlcore_hw_set_cac(wl, wlvif, false);
4860 		wlvif->radar_enabled = false;
4861 	}
4862 
4863 	wlvif->band = new_ctx->def.chan->band;
4864 	wlvif->channel = channel;
4865 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4866 
4867 	/* start radar if needed */
4868 	if (new_ctx->radar_enabled) {
4869 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4870 		wlcore_hw_set_cac(wl, wlvif, true);
4871 		wlvif->radar_enabled = true;
4872 	}
4873 
4874 	return 0;
4875 }
4876 
4877 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4878 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4879 			     struct ieee80211_vif_chanctx_switch *vifs,
4880 			     int n_vifs,
4881 			     enum ieee80211_chanctx_switch_mode mode)
4882 {
4883 	struct wl1271 *wl = hw->priv;
4884 	int i, ret;
4885 
4886 	wl1271_debug(DEBUG_MAC80211,
4887 		     "mac80211 switch chanctx n_vifs %d mode %d",
4888 		     n_vifs, mode);
4889 
4890 	mutex_lock(&wl->mutex);
4891 
4892 	ret = pm_runtime_get_sync(wl->dev);
4893 	if (ret < 0) {
4894 		pm_runtime_put_noidle(wl->dev);
4895 		goto out;
4896 	}
4897 
4898 	for (i = 0; i < n_vifs; i++) {
4899 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4900 
4901 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4902 		if (ret)
4903 			goto out_sleep;
4904 	}
4905 out_sleep:
4906 	pm_runtime_mark_last_busy(wl->dev);
4907 	pm_runtime_put_autosuspend(wl->dev);
4908 out:
4909 	mutex_unlock(&wl->mutex);
4910 
4911 	return 0;
4912 }
4913 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4914 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4915 			     struct ieee80211_vif *vif, u16 queue,
4916 			     const struct ieee80211_tx_queue_params *params)
4917 {
4918 	struct wl1271 *wl = hw->priv;
4919 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4920 	u8 ps_scheme;
4921 	int ret = 0;
4922 
4923 	if (wlcore_is_p2p_mgmt(wlvif))
4924 		return 0;
4925 
4926 	mutex_lock(&wl->mutex);
4927 
4928 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4929 
4930 	if (params->uapsd)
4931 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4932 	else
4933 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4934 
4935 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4936 		goto out;
4937 
4938 	ret = pm_runtime_get_sync(wl->dev);
4939 	if (ret < 0) {
4940 		pm_runtime_put_noidle(wl->dev);
4941 		goto out;
4942 	}
4943 
4944 	/*
4945 	 * the txop is confed in units of 32us by the mac80211,
4946 	 * we need us
4947 	 */
4948 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4949 				params->cw_min, params->cw_max,
4950 				params->aifs, params->txop << 5);
4951 	if (ret < 0)
4952 		goto out_sleep;
4953 
4954 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4955 				 CONF_CHANNEL_TYPE_EDCF,
4956 				 wl1271_tx_get_queue(queue),
4957 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4958 				 0, 0);
4959 
4960 out_sleep:
4961 	pm_runtime_mark_last_busy(wl->dev);
4962 	pm_runtime_put_autosuspend(wl->dev);
4963 
4964 out:
4965 	mutex_unlock(&wl->mutex);
4966 
4967 	return ret;
4968 }
4969 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4970 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4971 			     struct ieee80211_vif *vif)
4972 {
4973 
4974 	struct wl1271 *wl = hw->priv;
4975 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4976 	u64 mactime = ULLONG_MAX;
4977 	int ret;
4978 
4979 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4980 
4981 	mutex_lock(&wl->mutex);
4982 
4983 	if (unlikely(wl->state != WLCORE_STATE_ON))
4984 		goto out;
4985 
4986 	ret = pm_runtime_get_sync(wl->dev);
4987 	if (ret < 0) {
4988 		pm_runtime_put_noidle(wl->dev);
4989 		goto out;
4990 	}
4991 
4992 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4993 	if (ret < 0)
4994 		goto out_sleep;
4995 
4996 out_sleep:
4997 	pm_runtime_mark_last_busy(wl->dev);
4998 	pm_runtime_put_autosuspend(wl->dev);
4999 
5000 out:
5001 	mutex_unlock(&wl->mutex);
5002 	return mactime;
5003 }
5004 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5005 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5006 				struct survey_info *survey)
5007 {
5008 	struct ieee80211_conf *conf = &hw->conf;
5009 
5010 	if (idx != 0)
5011 		return -ENOENT;
5012 
5013 	survey->channel = conf->chandef.chan;
5014 	survey->filled = 0;
5015 	return 0;
5016 }
5017 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5018 static int wl1271_allocate_sta(struct wl1271 *wl,
5019 			     struct wl12xx_vif *wlvif,
5020 			     struct ieee80211_sta *sta)
5021 {
5022 	struct wl1271_station *wl_sta;
5023 	int ret;
5024 
5025 
5026 	if (wl->active_sta_count >= wl->max_ap_stations) {
5027 		wl1271_warning("could not allocate HLID - too much stations");
5028 		return -EBUSY;
5029 	}
5030 
5031 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5032 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5033 	if (ret < 0) {
5034 		wl1271_warning("could not allocate HLID - too many links");
5035 		return -EBUSY;
5036 	}
5037 
5038 	/* use the previous security seq, if this is a recovery/resume */
5039 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5040 
5041 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5042 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5043 	wl->active_sta_count++;
5044 	return 0;
5045 }
5046 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5047 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5048 {
5049 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5050 		return;
5051 
5052 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5053 	__clear_bit(hlid, &wl->ap_ps_map);
5054 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5055 
5056 	/*
5057 	 * save the last used PN in the private part of iee80211_sta,
5058 	 * in case of recovery/suspend
5059 	 */
5060 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5061 
5062 	wl12xx_free_link(wl, wlvif, &hlid);
5063 	wl->active_sta_count--;
5064 
5065 	/*
5066 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5067 	 * chance to return STA-buffered packets before complaining.
5068 	 */
5069 	if (wl->active_sta_count == 0)
5070 		wl12xx_rearm_tx_watchdog_locked(wl);
5071 }
5072 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5073 static int wl12xx_sta_add(struct wl1271 *wl,
5074 			  struct wl12xx_vif *wlvif,
5075 			  struct ieee80211_sta *sta)
5076 {
5077 	struct wl1271_station *wl_sta;
5078 	int ret = 0;
5079 	u8 hlid;
5080 
5081 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5082 
5083 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5084 	if (ret < 0)
5085 		return ret;
5086 
5087 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5088 	hlid = wl_sta->hlid;
5089 
5090 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5091 	if (ret < 0)
5092 		wl1271_free_sta(wl, wlvif, hlid);
5093 
5094 	return ret;
5095 }
5096 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5097 static int wl12xx_sta_remove(struct wl1271 *wl,
5098 			     struct wl12xx_vif *wlvif,
5099 			     struct ieee80211_sta *sta)
5100 {
5101 	struct wl1271_station *wl_sta;
5102 	int ret = 0, id;
5103 
5104 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5105 
5106 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5107 	id = wl_sta->hlid;
5108 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5109 		return -EINVAL;
5110 
5111 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5112 	if (ret < 0)
5113 		return ret;
5114 
5115 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5116 	return ret;
5117 }
5118 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5119 static void wlcore_roc_if_possible(struct wl1271 *wl,
5120 				   struct wl12xx_vif *wlvif)
5121 {
5122 	if (find_first_bit(wl->roc_map,
5123 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5124 		return;
5125 
5126 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5127 		return;
5128 
5129 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5130 }
5131 
5132 /*
5133  * when wl_sta is NULL, we treat this call as if coming from a
5134  * pending auth reply.
5135  * wl->mutex must be taken and the FW must be awake when the call
5136  * takes place.
5137  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5138 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5139 			      struct wl1271_station *wl_sta, bool in_conn)
5140 {
5141 	if (in_conn) {
5142 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5143 			return;
5144 
5145 		if (!wlvif->ap_pending_auth_reply &&
5146 		    !wlvif->inconn_count)
5147 			wlcore_roc_if_possible(wl, wlvif);
5148 
5149 		if (wl_sta) {
5150 			wl_sta->in_connection = true;
5151 			wlvif->inconn_count++;
5152 		} else {
5153 			wlvif->ap_pending_auth_reply = true;
5154 		}
5155 	} else {
5156 		if (wl_sta && !wl_sta->in_connection)
5157 			return;
5158 
5159 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5160 			return;
5161 
5162 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5163 			return;
5164 
5165 		if (wl_sta) {
5166 			wl_sta->in_connection = false;
5167 			wlvif->inconn_count--;
5168 		} else {
5169 			wlvif->ap_pending_auth_reply = false;
5170 		}
5171 
5172 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5173 		    test_bit(wlvif->role_id, wl->roc_map))
5174 			wl12xx_croc(wl, wlvif->role_id);
5175 	}
5176 }
5177 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5178 static int wl12xx_update_sta_state(struct wl1271 *wl,
5179 				   struct wl12xx_vif *wlvif,
5180 				   struct ieee80211_sta *sta,
5181 				   enum ieee80211_sta_state old_state,
5182 				   enum ieee80211_sta_state new_state)
5183 {
5184 	struct wl1271_station *wl_sta;
5185 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5186 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5187 	int ret;
5188 
5189 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5190 
5191 	/* Add station (AP mode) */
5192 	if (is_ap &&
5193 	    old_state == IEEE80211_STA_NOTEXIST &&
5194 	    new_state == IEEE80211_STA_NONE) {
5195 		ret = wl12xx_sta_add(wl, wlvif, sta);
5196 		if (ret)
5197 			return ret;
5198 
5199 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5200 	}
5201 
5202 	/* Remove station (AP mode) */
5203 	if (is_ap &&
5204 	    old_state == IEEE80211_STA_NONE &&
5205 	    new_state == IEEE80211_STA_NOTEXIST) {
5206 		/* must not fail */
5207 		wl12xx_sta_remove(wl, wlvif, sta);
5208 
5209 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5210 	}
5211 
5212 	/* Authorize station (AP mode) */
5213 	if (is_ap &&
5214 	    new_state == IEEE80211_STA_AUTHORIZED) {
5215 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5216 		if (ret < 0)
5217 			return ret;
5218 
5219 		/* reconfigure rates */
5220 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5221 		if (ret < 0)
5222 			return ret;
5223 
5224 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5225 						     wl_sta->hlid);
5226 		if (ret)
5227 			return ret;
5228 
5229 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5230 	}
5231 
5232 	/* Authorize station */
5233 	if (is_sta &&
5234 	    new_state == IEEE80211_STA_AUTHORIZED) {
5235 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5236 		ret = wl12xx_set_authorized(wl, wlvif);
5237 		if (ret)
5238 			return ret;
5239 	}
5240 
5241 	if (is_sta &&
5242 	    old_state == IEEE80211_STA_AUTHORIZED &&
5243 	    new_state == IEEE80211_STA_ASSOC) {
5244 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5245 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5246 	}
5247 
5248 	/* save seq number on disassoc (suspend) */
5249 	if (is_sta &&
5250 	    old_state == IEEE80211_STA_ASSOC &&
5251 	    new_state == IEEE80211_STA_AUTH) {
5252 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5253 		wlvif->total_freed_pkts = 0;
5254 	}
5255 
5256 	/* restore seq number on assoc (resume) */
5257 	if (is_sta &&
5258 	    old_state == IEEE80211_STA_AUTH &&
5259 	    new_state == IEEE80211_STA_ASSOC) {
5260 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5261 	}
5262 
5263 	/* clear ROCs on failure or authorization */
5264 	if (is_sta &&
5265 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5266 	     new_state == IEEE80211_STA_NOTEXIST)) {
5267 		if (test_bit(wlvif->role_id, wl->roc_map))
5268 			wl12xx_croc(wl, wlvif->role_id);
5269 	}
5270 
5271 	if (is_sta &&
5272 	    old_state == IEEE80211_STA_NOTEXIST &&
5273 	    new_state == IEEE80211_STA_NONE) {
5274 		if (find_first_bit(wl->roc_map,
5275 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5276 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5277 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5278 				   wlvif->band, wlvif->channel);
5279 		}
5280 	}
5281 	return 0;
5282 }
5283 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5284 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5285 			       struct ieee80211_vif *vif,
5286 			       struct ieee80211_sta *sta,
5287 			       enum ieee80211_sta_state old_state,
5288 			       enum ieee80211_sta_state new_state)
5289 {
5290 	struct wl1271 *wl = hw->priv;
5291 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5292 	int ret;
5293 
5294 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5295 		     sta->aid, old_state, new_state);
5296 
5297 	mutex_lock(&wl->mutex);
5298 
5299 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5300 		ret = -EBUSY;
5301 		goto out;
5302 	}
5303 
5304 	ret = pm_runtime_get_sync(wl->dev);
5305 	if (ret < 0) {
5306 		pm_runtime_put_noidle(wl->dev);
5307 		goto out;
5308 	}
5309 
5310 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5311 
5312 	pm_runtime_mark_last_busy(wl->dev);
5313 	pm_runtime_put_autosuspend(wl->dev);
5314 out:
5315 	mutex_unlock(&wl->mutex);
5316 	if (new_state < old_state)
5317 		return 0;
5318 	return ret;
5319 }
5320 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5321 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5322 				  struct ieee80211_vif *vif,
5323 				  struct ieee80211_ampdu_params *params)
5324 {
5325 	struct wl1271 *wl = hw->priv;
5326 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5327 	int ret;
5328 	u8 hlid, *ba_bitmap;
5329 	struct ieee80211_sta *sta = params->sta;
5330 	enum ieee80211_ampdu_mlme_action action = params->action;
5331 	u16 tid = params->tid;
5332 	u16 *ssn = &params->ssn;
5333 
5334 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5335 		     tid);
5336 
5337 	/* sanity check - the fields in FW are only 8bits wide */
5338 	if (WARN_ON(tid > 0xFF))
5339 		return -ENOTSUPP;
5340 
5341 	mutex_lock(&wl->mutex);
5342 
5343 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5344 		ret = -EAGAIN;
5345 		goto out;
5346 	}
5347 
5348 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5349 		hlid = wlvif->sta.hlid;
5350 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5351 		struct wl1271_station *wl_sta;
5352 
5353 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5354 		hlid = wl_sta->hlid;
5355 	} else {
5356 		ret = -EINVAL;
5357 		goto out;
5358 	}
5359 
5360 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5361 
5362 	ret = pm_runtime_get_sync(wl->dev);
5363 	if (ret < 0) {
5364 		pm_runtime_put_noidle(wl->dev);
5365 		goto out;
5366 	}
5367 
5368 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5369 		     tid, action);
5370 
5371 	switch (action) {
5372 	case IEEE80211_AMPDU_RX_START:
5373 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5374 			ret = -ENOTSUPP;
5375 			break;
5376 		}
5377 
5378 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5379 			ret = -EBUSY;
5380 			wl1271_error("exceeded max RX BA sessions");
5381 			break;
5382 		}
5383 
5384 		if (*ba_bitmap & BIT(tid)) {
5385 			ret = -EINVAL;
5386 			wl1271_error("cannot enable RX BA session on active "
5387 				     "tid: %d", tid);
5388 			break;
5389 		}
5390 
5391 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5392 				hlid,
5393 				params->buf_size);
5394 
5395 		if (!ret) {
5396 			*ba_bitmap |= BIT(tid);
5397 			wl->ba_rx_session_count++;
5398 		}
5399 		break;
5400 
5401 	case IEEE80211_AMPDU_RX_STOP:
5402 		if (!(*ba_bitmap & BIT(tid))) {
5403 			/*
5404 			 * this happens on reconfig - so only output a debug
5405 			 * message for now, and don't fail the function.
5406 			 */
5407 			wl1271_debug(DEBUG_MAC80211,
5408 				     "no active RX BA session on tid: %d",
5409 				     tid);
5410 			ret = 0;
5411 			break;
5412 		}
5413 
5414 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5415 							 hlid, 0);
5416 		if (!ret) {
5417 			*ba_bitmap &= ~BIT(tid);
5418 			wl->ba_rx_session_count--;
5419 		}
5420 		break;
5421 
5422 	/*
5423 	 * The BA initiator session management in FW independently.
5424 	 * Falling break here on purpose for all TX APDU commands.
5425 	 */
5426 	case IEEE80211_AMPDU_TX_START:
5427 	case IEEE80211_AMPDU_TX_STOP_CONT:
5428 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5429 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5430 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5431 		ret = -EINVAL;
5432 		break;
5433 
5434 	default:
5435 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5436 		ret = -EINVAL;
5437 	}
5438 
5439 	pm_runtime_mark_last_busy(wl->dev);
5440 	pm_runtime_put_autosuspend(wl->dev);
5441 
5442 out:
5443 	mutex_unlock(&wl->mutex);
5444 
5445 	return ret;
5446 }
5447 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5448 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5449 				   struct ieee80211_vif *vif,
5450 				   const struct cfg80211_bitrate_mask *mask)
5451 {
5452 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5453 	struct wl1271 *wl = hw->priv;
5454 	int i, ret = 0;
5455 
5456 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5457 		mask->control[NL80211_BAND_2GHZ].legacy,
5458 		mask->control[NL80211_BAND_5GHZ].legacy);
5459 
5460 	mutex_lock(&wl->mutex);
5461 
5462 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5463 		wlvif->bitrate_masks[i] =
5464 			wl1271_tx_enabled_rates_get(wl,
5465 						    mask->control[i].legacy,
5466 						    i);
5467 
5468 	if (unlikely(wl->state != WLCORE_STATE_ON))
5469 		goto out;
5470 
5471 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5472 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5473 
5474 		ret = pm_runtime_get_sync(wl->dev);
5475 		if (ret < 0) {
5476 			pm_runtime_put_noidle(wl->dev);
5477 			goto out;
5478 		}
5479 
5480 		wl1271_set_band_rate(wl, wlvif);
5481 		wlvif->basic_rate =
5482 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5483 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5484 
5485 		pm_runtime_mark_last_busy(wl->dev);
5486 		pm_runtime_put_autosuspend(wl->dev);
5487 	}
5488 out:
5489 	mutex_unlock(&wl->mutex);
5490 
5491 	return ret;
5492 }
5493 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5494 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5495 				     struct ieee80211_vif *vif,
5496 				     struct ieee80211_channel_switch *ch_switch)
5497 {
5498 	struct wl1271 *wl = hw->priv;
5499 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5500 	int ret;
5501 
5502 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5503 
5504 	wl1271_tx_flush(wl);
5505 
5506 	mutex_lock(&wl->mutex);
5507 
5508 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5509 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5510 			ieee80211_chswitch_done(vif, false);
5511 		goto out;
5512 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5513 		goto out;
5514 	}
5515 
5516 	ret = pm_runtime_get_sync(wl->dev);
5517 	if (ret < 0) {
5518 		pm_runtime_put_noidle(wl->dev);
5519 		goto out;
5520 	}
5521 
5522 	/* TODO: change mac80211 to pass vif as param */
5523 
5524 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5525 		unsigned long delay_usec;
5526 
5527 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5528 		if (ret)
5529 			goto out_sleep;
5530 
5531 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5532 
5533 		/* indicate failure 5 seconds after channel switch time */
5534 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5535 			ch_switch->count;
5536 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5537 					     usecs_to_jiffies(delay_usec) +
5538 					     msecs_to_jiffies(5000));
5539 	}
5540 
5541 out_sleep:
5542 	pm_runtime_mark_last_busy(wl->dev);
5543 	pm_runtime_put_autosuspend(wl->dev);
5544 
5545 out:
5546 	mutex_unlock(&wl->mutex);
5547 }
5548 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5549 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5550 					struct wl12xx_vif *wlvif,
5551 					u8 eid)
5552 {
5553 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5554 	struct sk_buff *beacon =
5555 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5556 
5557 	if (!beacon)
5558 		return NULL;
5559 
5560 	return cfg80211_find_ie(eid,
5561 				beacon->data + ieoffset,
5562 				beacon->len - ieoffset);
5563 }
5564 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5565 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5566 				u8 *csa_count)
5567 {
5568 	const u8 *ie;
5569 	const struct ieee80211_channel_sw_ie *ie_csa;
5570 
5571 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5572 	if (!ie)
5573 		return -EINVAL;
5574 
5575 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5576 	*csa_count = ie_csa->count;
5577 
5578 	return 0;
5579 }
5580 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5581 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5582 					    struct ieee80211_vif *vif,
5583 					    struct cfg80211_chan_def *chandef)
5584 {
5585 	struct wl1271 *wl = hw->priv;
5586 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5587 	struct ieee80211_channel_switch ch_switch = {
5588 		.block_tx = true,
5589 		.chandef = *chandef,
5590 	};
5591 	int ret;
5592 
5593 	wl1271_debug(DEBUG_MAC80211,
5594 		     "mac80211 channel switch beacon (role %d)",
5595 		     wlvif->role_id);
5596 
5597 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5598 	if (ret < 0) {
5599 		wl1271_error("error getting beacon (for CSA counter)");
5600 		return;
5601 	}
5602 
5603 	mutex_lock(&wl->mutex);
5604 
5605 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5606 		ret = -EBUSY;
5607 		goto out;
5608 	}
5609 
5610 	ret = pm_runtime_get_sync(wl->dev);
5611 	if (ret < 0) {
5612 		pm_runtime_put_noidle(wl->dev);
5613 		goto out;
5614 	}
5615 
5616 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5617 	if (ret)
5618 		goto out_sleep;
5619 
5620 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5621 
5622 out_sleep:
5623 	pm_runtime_mark_last_busy(wl->dev);
5624 	pm_runtime_put_autosuspend(wl->dev);
5625 out:
5626 	mutex_unlock(&wl->mutex);
5627 }
5628 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5629 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5630 			    u32 queues, bool drop)
5631 {
5632 	struct wl1271 *wl = hw->priv;
5633 
5634 	wl1271_tx_flush(wl);
5635 }
5636 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5637 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5638 				       struct ieee80211_vif *vif,
5639 				       struct ieee80211_channel *chan,
5640 				       int duration,
5641 				       enum ieee80211_roc_type type)
5642 {
5643 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5644 	struct wl1271 *wl = hw->priv;
5645 	int channel, active_roc, ret = 0;
5646 
5647 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5648 
5649 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5650 		     channel, wlvif->role_id);
5651 
5652 	mutex_lock(&wl->mutex);
5653 
5654 	if (unlikely(wl->state != WLCORE_STATE_ON))
5655 		goto out;
5656 
5657 	/* return EBUSY if we can't ROC right now */
5658 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5659 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5660 		wl1271_warning("active roc on role %d", active_roc);
5661 		ret = -EBUSY;
5662 		goto out;
5663 	}
5664 
5665 	ret = pm_runtime_get_sync(wl->dev);
5666 	if (ret < 0) {
5667 		pm_runtime_put_noidle(wl->dev);
5668 		goto out;
5669 	}
5670 
5671 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5672 	if (ret < 0)
5673 		goto out_sleep;
5674 
5675 	wl->roc_vif = vif;
5676 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5677 				     msecs_to_jiffies(duration));
5678 out_sleep:
5679 	pm_runtime_mark_last_busy(wl->dev);
5680 	pm_runtime_put_autosuspend(wl->dev);
5681 out:
5682 	mutex_unlock(&wl->mutex);
5683 	return ret;
5684 }
5685 
__wlcore_roc_completed(struct wl1271 * wl)5686 static int __wlcore_roc_completed(struct wl1271 *wl)
5687 {
5688 	struct wl12xx_vif *wlvif;
5689 	int ret;
5690 
5691 	/* already completed */
5692 	if (unlikely(!wl->roc_vif))
5693 		return 0;
5694 
5695 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5696 
5697 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5698 		return -EBUSY;
5699 
5700 	ret = wl12xx_stop_dev(wl, wlvif);
5701 	if (ret < 0)
5702 		return ret;
5703 
5704 	wl->roc_vif = NULL;
5705 
5706 	return 0;
5707 }
5708 
wlcore_roc_completed(struct wl1271 * wl)5709 static int wlcore_roc_completed(struct wl1271 *wl)
5710 {
5711 	int ret;
5712 
5713 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5714 
5715 	mutex_lock(&wl->mutex);
5716 
5717 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5718 		ret = -EBUSY;
5719 		goto out;
5720 	}
5721 
5722 	ret = pm_runtime_get_sync(wl->dev);
5723 	if (ret < 0) {
5724 		pm_runtime_put_noidle(wl->dev);
5725 		goto out;
5726 	}
5727 
5728 	ret = __wlcore_roc_completed(wl);
5729 
5730 	pm_runtime_mark_last_busy(wl->dev);
5731 	pm_runtime_put_autosuspend(wl->dev);
5732 out:
5733 	mutex_unlock(&wl->mutex);
5734 
5735 	return ret;
5736 }
5737 
wlcore_roc_complete_work(struct work_struct * work)5738 static void wlcore_roc_complete_work(struct work_struct *work)
5739 {
5740 	struct delayed_work *dwork;
5741 	struct wl1271 *wl;
5742 	int ret;
5743 
5744 	dwork = to_delayed_work(work);
5745 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5746 
5747 	ret = wlcore_roc_completed(wl);
5748 	if (!ret)
5749 		ieee80211_remain_on_channel_expired(wl->hw);
5750 }
5751 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5752 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5753 					      struct ieee80211_vif *vif)
5754 {
5755 	struct wl1271 *wl = hw->priv;
5756 
5757 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5758 
5759 	/* TODO: per-vif */
5760 	wl1271_tx_flush(wl);
5761 
5762 	/*
5763 	 * we can't just flush_work here, because it might deadlock
5764 	 * (as we might get called from the same workqueue)
5765 	 */
5766 	cancel_delayed_work_sync(&wl->roc_complete_work);
5767 	wlcore_roc_completed(wl);
5768 
5769 	return 0;
5770 }
5771 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5772 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5773 				    struct ieee80211_vif *vif,
5774 				    struct ieee80211_sta *sta,
5775 				    u32 changed)
5776 {
5777 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5778 
5779 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5780 
5781 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5782 		return;
5783 
5784 	/* this callback is atomic, so schedule a new work */
5785 	wlvif->rc_update_bw = sta->bandwidth;
5786 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5787 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5788 }
5789 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5790 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5791 				     struct ieee80211_vif *vif,
5792 				     struct ieee80211_sta *sta,
5793 				     struct station_info *sinfo)
5794 {
5795 	struct wl1271 *wl = hw->priv;
5796 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5797 	s8 rssi_dbm;
5798 	int ret;
5799 
5800 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5801 
5802 	mutex_lock(&wl->mutex);
5803 
5804 	if (unlikely(wl->state != WLCORE_STATE_ON))
5805 		goto out;
5806 
5807 	ret = pm_runtime_get_sync(wl->dev);
5808 	if (ret < 0) {
5809 		pm_runtime_put_noidle(wl->dev);
5810 		goto out_sleep;
5811 	}
5812 
5813 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5814 	if (ret < 0)
5815 		goto out_sleep;
5816 
5817 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5818 	sinfo->signal = rssi_dbm;
5819 
5820 out_sleep:
5821 	pm_runtime_mark_last_busy(wl->dev);
5822 	pm_runtime_put_autosuspend(wl->dev);
5823 
5824 out:
5825 	mutex_unlock(&wl->mutex);
5826 }
5827 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5828 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5829 					     struct ieee80211_sta *sta)
5830 {
5831 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5832 	struct wl1271 *wl = hw->priv;
5833 	u8 hlid = wl_sta->hlid;
5834 
5835 	/* return in units of Kbps */
5836 	return (wl->links[hlid].fw_rate_mbps * 1000);
5837 }
5838 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5839 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5840 {
5841 	struct wl1271 *wl = hw->priv;
5842 	bool ret = false;
5843 
5844 	mutex_lock(&wl->mutex);
5845 
5846 	if (unlikely(wl->state != WLCORE_STATE_ON))
5847 		goto out;
5848 
5849 	/* packets are considered pending if in the TX queue or the FW */
5850 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5851 out:
5852 	mutex_unlock(&wl->mutex);
5853 
5854 	return ret;
5855 }
5856 
5857 /* can't be const, mac80211 writes to this */
5858 static struct ieee80211_rate wl1271_rates[] = {
5859 	{ .bitrate = 10,
5860 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5861 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5862 	{ .bitrate = 20,
5863 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5864 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5865 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5866 	{ .bitrate = 55,
5867 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5868 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5869 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5870 	{ .bitrate = 110,
5871 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5872 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5873 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5874 	{ .bitrate = 60,
5875 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5876 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5877 	{ .bitrate = 90,
5878 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5879 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5880 	{ .bitrate = 120,
5881 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5882 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5883 	{ .bitrate = 180,
5884 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5886 	{ .bitrate = 240,
5887 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5888 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5889 	{ .bitrate = 360,
5890 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5891 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5892 	{ .bitrate = 480,
5893 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5894 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5895 	{ .bitrate = 540,
5896 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5897 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5898 };
5899 
5900 /* can't be const, mac80211 writes to this */
5901 static struct ieee80211_channel wl1271_channels[] = {
5902 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5903 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5904 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5905 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5906 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5907 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5908 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5909 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5910 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5911 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5912 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5913 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5914 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5916 };
5917 
5918 /* can't be const, mac80211 writes to this */
5919 static struct ieee80211_supported_band wl1271_band_2ghz = {
5920 	.channels = wl1271_channels,
5921 	.n_channels = ARRAY_SIZE(wl1271_channels),
5922 	.bitrates = wl1271_rates,
5923 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5924 };
5925 
5926 /* 5 GHz data rates for WL1273 */
5927 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5928 	{ .bitrate = 60,
5929 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5930 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5931 	{ .bitrate = 90,
5932 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5933 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5934 	{ .bitrate = 120,
5935 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5936 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5937 	{ .bitrate = 180,
5938 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5939 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5940 	{ .bitrate = 240,
5941 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5942 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5943 	{ .bitrate = 360,
5944 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5945 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5946 	{ .bitrate = 480,
5947 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5948 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5949 	{ .bitrate = 540,
5950 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5951 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5952 };
5953 
5954 /* 5 GHz band channels for WL1273 */
5955 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5956 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5957 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5958 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5959 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5960 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5961 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5962 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5963 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5964 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5965 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5966 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5967 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5968 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5969 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5987 };
5988 
5989 static struct ieee80211_supported_band wl1271_band_5ghz = {
5990 	.channels = wl1271_channels_5ghz,
5991 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5992 	.bitrates = wl1271_rates_5ghz,
5993 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5994 };
5995 
5996 static const struct ieee80211_ops wl1271_ops = {
5997 	.start = wl1271_op_start,
5998 	.stop = wlcore_op_stop,
5999 	.add_interface = wl1271_op_add_interface,
6000 	.remove_interface = wl1271_op_remove_interface,
6001 	.change_interface = wl12xx_op_change_interface,
6002 #ifdef CONFIG_PM
6003 	.suspend = wl1271_op_suspend,
6004 	.resume = wl1271_op_resume,
6005 #endif
6006 	.config = wl1271_op_config,
6007 	.prepare_multicast = wl1271_op_prepare_multicast,
6008 	.configure_filter = wl1271_op_configure_filter,
6009 	.tx = wl1271_op_tx,
6010 	.set_key = wlcore_op_set_key,
6011 	.hw_scan = wl1271_op_hw_scan,
6012 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6013 	.sched_scan_start = wl1271_op_sched_scan_start,
6014 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6015 	.bss_info_changed = wl1271_op_bss_info_changed,
6016 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6017 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6018 	.conf_tx = wl1271_op_conf_tx,
6019 	.get_tsf = wl1271_op_get_tsf,
6020 	.get_survey = wl1271_op_get_survey,
6021 	.sta_state = wl12xx_op_sta_state,
6022 	.ampdu_action = wl1271_op_ampdu_action,
6023 	.tx_frames_pending = wl1271_tx_frames_pending,
6024 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6025 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6026 	.channel_switch = wl12xx_op_channel_switch,
6027 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6028 	.flush = wlcore_op_flush,
6029 	.remain_on_channel = wlcore_op_remain_on_channel,
6030 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6031 	.add_chanctx = wlcore_op_add_chanctx,
6032 	.remove_chanctx = wlcore_op_remove_chanctx,
6033 	.change_chanctx = wlcore_op_change_chanctx,
6034 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6035 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6036 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6037 	.sta_rc_update = wlcore_op_sta_rc_update,
6038 	.sta_statistics = wlcore_op_sta_statistics,
6039 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6040 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6041 };
6042 
6043 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6044 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6045 {
6046 	u8 idx;
6047 
6048 	BUG_ON(band >= 2);
6049 
6050 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6051 		wl1271_error("Illegal RX rate from HW: %d", rate);
6052 		return 0;
6053 	}
6054 
6055 	idx = wl->band_rate_to_idx[band][rate];
6056 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6057 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6058 		return 0;
6059 	}
6060 
6061 	return idx;
6062 }
6063 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6064 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6065 {
6066 	int i;
6067 
6068 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6069 		     oui, nic);
6070 
6071 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6072 		wl1271_warning("NIC part of the MAC address wraps around!");
6073 
6074 	for (i = 0; i < wl->num_mac_addr; i++) {
6075 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6076 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6077 		wl->addresses[i].addr[2] = (u8) oui;
6078 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6079 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6080 		wl->addresses[i].addr[5] = (u8) nic;
6081 		nic++;
6082 	}
6083 
6084 	/* we may be one address short at the most */
6085 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6086 
6087 	/*
6088 	 * turn on the LAA bit in the first address and use it as
6089 	 * the last address.
6090 	 */
6091 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6092 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6093 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6094 		       sizeof(wl->addresses[0]));
6095 		/* LAA bit */
6096 		wl->addresses[idx].addr[0] |= BIT(1);
6097 	}
6098 
6099 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6100 	wl->hw->wiphy->addresses = wl->addresses;
6101 }
6102 
wl12xx_get_hw_info(struct wl1271 * wl)6103 static int wl12xx_get_hw_info(struct wl1271 *wl)
6104 {
6105 	int ret;
6106 
6107 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6108 	if (ret < 0)
6109 		goto out;
6110 
6111 	wl->fuse_oui_addr = 0;
6112 	wl->fuse_nic_addr = 0;
6113 
6114 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6115 	if (ret < 0)
6116 		goto out;
6117 
6118 	if (wl->ops->get_mac)
6119 		ret = wl->ops->get_mac(wl);
6120 
6121 out:
6122 	return ret;
6123 }
6124 
wl1271_register_hw(struct wl1271 * wl)6125 static int wl1271_register_hw(struct wl1271 *wl)
6126 {
6127 	int ret;
6128 	u32 oui_addr = 0, nic_addr = 0;
6129 	struct platform_device *pdev = wl->pdev;
6130 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6131 
6132 	if (wl->mac80211_registered)
6133 		return 0;
6134 
6135 	if (wl->nvs_len >= 12) {
6136 		/* NOTE: The wl->nvs->nvs element must be first, in
6137 		 * order to simplify the casting, we assume it is at
6138 		 * the beginning of the wl->nvs structure.
6139 		 */
6140 		u8 *nvs_ptr = (u8 *)wl->nvs;
6141 
6142 		oui_addr =
6143 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6144 		nic_addr =
6145 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6146 	}
6147 
6148 	/* if the MAC address is zeroed in the NVS derive from fuse */
6149 	if (oui_addr == 0 && nic_addr == 0) {
6150 		oui_addr = wl->fuse_oui_addr;
6151 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6152 		nic_addr = wl->fuse_nic_addr + 1;
6153 	}
6154 
6155 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6156 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6157 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6158 			wl1271_warning("This default nvs file can be removed from the file system");
6159 		} else {
6160 			wl1271_warning("Your device performance is not optimized.");
6161 			wl1271_warning("Please use the calibrator tool to configure your device.");
6162 		}
6163 
6164 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6165 			wl1271_warning("Fuse mac address is zero. using random mac");
6166 			/* Use TI oui and a random nic */
6167 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6168 			nic_addr = get_random_int();
6169 		} else {
6170 			oui_addr = wl->fuse_oui_addr;
6171 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6172 			nic_addr = wl->fuse_nic_addr + 1;
6173 		}
6174 	}
6175 
6176 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6177 
6178 	ret = ieee80211_register_hw(wl->hw);
6179 	if (ret < 0) {
6180 		wl1271_error("unable to register mac80211 hw: %d", ret);
6181 		goto out;
6182 	}
6183 
6184 	wl->mac80211_registered = true;
6185 
6186 	wl1271_debugfs_init(wl);
6187 
6188 	wl1271_notice("loaded");
6189 
6190 out:
6191 	return ret;
6192 }
6193 
wl1271_unregister_hw(struct wl1271 * wl)6194 static void wl1271_unregister_hw(struct wl1271 *wl)
6195 {
6196 	if (wl->plt)
6197 		wl1271_plt_stop(wl);
6198 
6199 	ieee80211_unregister_hw(wl->hw);
6200 	wl->mac80211_registered = false;
6201 
6202 }
6203 
wl1271_init_ieee80211(struct wl1271 * wl)6204 static int wl1271_init_ieee80211(struct wl1271 *wl)
6205 {
6206 	int i;
6207 	static const u32 cipher_suites[] = {
6208 		WLAN_CIPHER_SUITE_WEP40,
6209 		WLAN_CIPHER_SUITE_WEP104,
6210 		WLAN_CIPHER_SUITE_TKIP,
6211 		WLAN_CIPHER_SUITE_CCMP,
6212 		WL1271_CIPHER_SUITE_GEM,
6213 	};
6214 
6215 	/* The tx descriptor buffer */
6216 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6217 
6218 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6219 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6220 
6221 	/* unit us */
6222 	/* FIXME: find a proper value */
6223 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6224 
6225 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6226 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6227 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6228 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6229 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6230 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6231 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6232 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6233 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6234 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6235 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6236 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6237 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6238 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6239 
6240 	wl->hw->wiphy->cipher_suites = cipher_suites;
6241 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6242 
6243 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6244 					 BIT(NL80211_IFTYPE_AP) |
6245 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6246 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6247 #ifdef CONFIG_MAC80211_MESH
6248 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6249 #endif
6250 					 BIT(NL80211_IFTYPE_P2P_GO);
6251 
6252 	wl->hw->wiphy->max_scan_ssids = 1;
6253 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6254 	wl->hw->wiphy->max_match_sets = 16;
6255 	/*
6256 	 * Maximum length of elements in scanning probe request templates
6257 	 * should be the maximum length possible for a template, without
6258 	 * the IEEE80211 header of the template
6259 	 */
6260 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6261 			sizeof(struct ieee80211_header);
6262 
6263 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6264 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6265 		sizeof(struct ieee80211_header);
6266 
6267 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6268 
6269 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6270 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6271 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6272 
6273 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6274 
6275 	/* make sure all our channels fit in the scanned_ch bitmask */
6276 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6277 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6278 		     WL1271_MAX_CHANNELS);
6279 	/*
6280 	* clear channel flags from the previous usage
6281 	* and restore max_power & max_antenna_gain values.
6282 	*/
6283 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6284 		wl1271_band_2ghz.channels[i].flags = 0;
6285 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6286 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6287 	}
6288 
6289 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6290 		wl1271_band_5ghz.channels[i].flags = 0;
6291 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6292 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6293 	}
6294 
6295 	/*
6296 	 * We keep local copies of the band structs because we need to
6297 	 * modify them on a per-device basis.
6298 	 */
6299 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6300 	       sizeof(wl1271_band_2ghz));
6301 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6302 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6303 	       sizeof(*wl->ht_cap));
6304 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6305 	       sizeof(wl1271_band_5ghz));
6306 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6307 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6308 	       sizeof(*wl->ht_cap));
6309 
6310 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6311 		&wl->bands[NL80211_BAND_2GHZ];
6312 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6313 		&wl->bands[NL80211_BAND_5GHZ];
6314 
6315 	/*
6316 	 * allow 4 queues per mac address we support +
6317 	 * 1 cab queue per mac + one global offchannel Tx queue
6318 	 */
6319 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6320 
6321 	/* the last queue is the offchannel queue */
6322 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6323 	wl->hw->max_rates = 1;
6324 
6325 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6326 
6327 	/* the FW answers probe-requests in AP-mode */
6328 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6329 	wl->hw->wiphy->probe_resp_offload =
6330 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6331 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6332 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6333 
6334 	/* allowed interface combinations */
6335 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6336 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6337 
6338 	/* register vendor commands */
6339 	wlcore_set_vendor_commands(wl->hw->wiphy);
6340 
6341 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6342 
6343 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6344 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6345 
6346 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6347 
6348 	return 0;
6349 }
6350 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6351 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6352 				     u32 mbox_size)
6353 {
6354 	struct ieee80211_hw *hw;
6355 	struct wl1271 *wl;
6356 	int i, j, ret;
6357 	unsigned int order;
6358 
6359 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6360 	if (!hw) {
6361 		wl1271_error("could not alloc ieee80211_hw");
6362 		ret = -ENOMEM;
6363 		goto err_hw_alloc;
6364 	}
6365 
6366 	wl = hw->priv;
6367 	memset(wl, 0, sizeof(*wl));
6368 
6369 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6370 	if (!wl->priv) {
6371 		wl1271_error("could not alloc wl priv");
6372 		ret = -ENOMEM;
6373 		goto err_priv_alloc;
6374 	}
6375 
6376 	INIT_LIST_HEAD(&wl->wlvif_list);
6377 
6378 	wl->hw = hw;
6379 
6380 	/*
6381 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6382 	 * we don't allocate any additional resource here, so that's fine.
6383 	 */
6384 	for (i = 0; i < NUM_TX_QUEUES; i++)
6385 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6386 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6387 
6388 	skb_queue_head_init(&wl->deferred_rx_queue);
6389 	skb_queue_head_init(&wl->deferred_tx_queue);
6390 
6391 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6392 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6393 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6394 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6395 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6396 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6397 
6398 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6399 	if (!wl->freezable_wq) {
6400 		ret = -ENOMEM;
6401 		goto err_hw;
6402 	}
6403 
6404 	wl->channel = 0;
6405 	wl->rx_counter = 0;
6406 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6407 	wl->band = NL80211_BAND_2GHZ;
6408 	wl->channel_type = NL80211_CHAN_NO_HT;
6409 	wl->flags = 0;
6410 	wl->sg_enabled = true;
6411 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6412 	wl->recovery_count = 0;
6413 	wl->hw_pg_ver = -1;
6414 	wl->ap_ps_map = 0;
6415 	wl->ap_fw_ps_map = 0;
6416 	wl->quirks = 0;
6417 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6418 	wl->active_sta_count = 0;
6419 	wl->active_link_count = 0;
6420 	wl->fwlog_size = 0;
6421 
6422 	/* The system link is always allocated */
6423 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6424 
6425 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6426 	for (i = 0; i < wl->num_tx_desc; i++)
6427 		wl->tx_frames[i] = NULL;
6428 
6429 	spin_lock_init(&wl->wl_lock);
6430 
6431 	wl->state = WLCORE_STATE_OFF;
6432 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6433 	mutex_init(&wl->mutex);
6434 	mutex_init(&wl->flush_mutex);
6435 	init_completion(&wl->nvs_loading_complete);
6436 
6437 	order = get_order(aggr_buf_size);
6438 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6439 	if (!wl->aggr_buf) {
6440 		ret = -ENOMEM;
6441 		goto err_wq;
6442 	}
6443 	wl->aggr_buf_size = aggr_buf_size;
6444 
6445 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6446 	if (!wl->dummy_packet) {
6447 		ret = -ENOMEM;
6448 		goto err_aggr;
6449 	}
6450 
6451 	/* Allocate one page for the FW log */
6452 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6453 	if (!wl->fwlog) {
6454 		ret = -ENOMEM;
6455 		goto err_dummy_packet;
6456 	}
6457 
6458 	wl->mbox_size = mbox_size;
6459 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6460 	if (!wl->mbox) {
6461 		ret = -ENOMEM;
6462 		goto err_fwlog;
6463 	}
6464 
6465 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6466 	if (!wl->buffer_32) {
6467 		ret = -ENOMEM;
6468 		goto err_mbox;
6469 	}
6470 
6471 	return hw;
6472 
6473 err_mbox:
6474 	kfree(wl->mbox);
6475 
6476 err_fwlog:
6477 	free_page((unsigned long)wl->fwlog);
6478 
6479 err_dummy_packet:
6480 	dev_kfree_skb(wl->dummy_packet);
6481 
6482 err_aggr:
6483 	free_pages((unsigned long)wl->aggr_buf, order);
6484 
6485 err_wq:
6486 	destroy_workqueue(wl->freezable_wq);
6487 
6488 err_hw:
6489 	wl1271_debugfs_exit(wl);
6490 	kfree(wl->priv);
6491 
6492 err_priv_alloc:
6493 	ieee80211_free_hw(hw);
6494 
6495 err_hw_alloc:
6496 
6497 	return ERR_PTR(ret);
6498 }
6499 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6500 
wlcore_free_hw(struct wl1271 * wl)6501 int wlcore_free_hw(struct wl1271 *wl)
6502 {
6503 	/* Unblock any fwlog readers */
6504 	mutex_lock(&wl->mutex);
6505 	wl->fwlog_size = -1;
6506 	mutex_unlock(&wl->mutex);
6507 
6508 	wlcore_sysfs_free(wl);
6509 
6510 	kfree(wl->buffer_32);
6511 	kfree(wl->mbox);
6512 	free_page((unsigned long)wl->fwlog);
6513 	dev_kfree_skb(wl->dummy_packet);
6514 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6515 
6516 	wl1271_debugfs_exit(wl);
6517 
6518 	vfree(wl->fw);
6519 	wl->fw = NULL;
6520 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6521 	kfree(wl->nvs);
6522 	wl->nvs = NULL;
6523 
6524 	kfree(wl->raw_fw_status);
6525 	kfree(wl->fw_status);
6526 	kfree(wl->tx_res_if);
6527 	destroy_workqueue(wl->freezable_wq);
6528 
6529 	kfree(wl->priv);
6530 	ieee80211_free_hw(wl->hw);
6531 
6532 	return 0;
6533 }
6534 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6535 
6536 #ifdef CONFIG_PM
6537 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6538 	.flags = WIPHY_WOWLAN_ANY,
6539 	.n_patterns = WL1271_MAX_RX_FILTERS,
6540 	.pattern_min_len = 1,
6541 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6542 };
6543 #endif
6544 
wlcore_hardirq(int irq,void * cookie)6545 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6546 {
6547 	return IRQ_WAKE_THREAD;
6548 }
6549 
wlcore_nvs_cb(const struct firmware * fw,void * context)6550 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6551 {
6552 	struct wl1271 *wl = context;
6553 	struct platform_device *pdev = wl->pdev;
6554 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6555 	struct resource *res;
6556 
6557 	int ret;
6558 	irq_handler_t hardirq_fn = NULL;
6559 
6560 	if (fw) {
6561 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6562 		if (!wl->nvs) {
6563 			wl1271_error("Could not allocate nvs data");
6564 			goto out;
6565 		}
6566 		wl->nvs_len = fw->size;
6567 	} else if (pdev_data->family->nvs_name) {
6568 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6569 			     pdev_data->family->nvs_name);
6570 		wl->nvs = NULL;
6571 		wl->nvs_len = 0;
6572 	} else {
6573 		wl->nvs = NULL;
6574 		wl->nvs_len = 0;
6575 	}
6576 
6577 	ret = wl->ops->setup(wl);
6578 	if (ret < 0)
6579 		goto out_free_nvs;
6580 
6581 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6582 
6583 	/* adjust some runtime configuration parameters */
6584 	wlcore_adjust_conf(wl);
6585 
6586 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6587 	if (!res) {
6588 		wl1271_error("Could not get IRQ resource");
6589 		goto out_free_nvs;
6590 	}
6591 
6592 	wl->irq = res->start;
6593 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6594 	wl->if_ops = pdev_data->if_ops;
6595 
6596 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6597 		hardirq_fn = wlcore_hardirq;
6598 	else
6599 		wl->irq_flags |= IRQF_ONESHOT;
6600 
6601 	ret = wl12xx_set_power_on(wl);
6602 	if (ret < 0)
6603 		goto out_free_nvs;
6604 
6605 	ret = wl12xx_get_hw_info(wl);
6606 	if (ret < 0) {
6607 		wl1271_error("couldn't get hw info");
6608 		wl1271_power_off(wl);
6609 		goto out_free_nvs;
6610 	}
6611 
6612 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6613 				   wl->irq_flags, pdev->name, wl);
6614 	if (ret < 0) {
6615 		wl1271_error("interrupt configuration failed");
6616 		wl1271_power_off(wl);
6617 		goto out_free_nvs;
6618 	}
6619 
6620 #ifdef CONFIG_PM
6621 	device_init_wakeup(wl->dev, true);
6622 
6623 	ret = enable_irq_wake(wl->irq);
6624 	if (!ret) {
6625 		wl->irq_wake_enabled = true;
6626 		if (pdev_data->pwr_in_suspend)
6627 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6628 	}
6629 
6630 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6631 	if (res) {
6632 		wl->wakeirq = res->start;
6633 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6634 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6635 		if (ret)
6636 			wl->wakeirq = -ENODEV;
6637 	} else {
6638 		wl->wakeirq = -ENODEV;
6639 	}
6640 #endif
6641 	disable_irq(wl->irq);
6642 	wl1271_power_off(wl);
6643 
6644 	ret = wl->ops->identify_chip(wl);
6645 	if (ret < 0)
6646 		goto out_irq;
6647 
6648 	ret = wl1271_init_ieee80211(wl);
6649 	if (ret)
6650 		goto out_irq;
6651 
6652 	ret = wl1271_register_hw(wl);
6653 	if (ret)
6654 		goto out_irq;
6655 
6656 	ret = wlcore_sysfs_init(wl);
6657 	if (ret)
6658 		goto out_unreg;
6659 
6660 	wl->initialized = true;
6661 	goto out;
6662 
6663 out_unreg:
6664 	wl1271_unregister_hw(wl);
6665 
6666 out_irq:
6667 	if (wl->wakeirq >= 0)
6668 		dev_pm_clear_wake_irq(wl->dev);
6669 	device_init_wakeup(wl->dev, false);
6670 	free_irq(wl->irq, wl);
6671 
6672 out_free_nvs:
6673 	kfree(wl->nvs);
6674 
6675 out:
6676 	release_firmware(fw);
6677 	complete_all(&wl->nvs_loading_complete);
6678 }
6679 
wlcore_runtime_suspend(struct device * dev)6680 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6681 {
6682 	struct wl1271 *wl = dev_get_drvdata(dev);
6683 	struct wl12xx_vif *wlvif;
6684 	int error;
6685 
6686 	/* We do not enter elp sleep in PLT mode */
6687 	if (wl->plt)
6688 		return 0;
6689 
6690 	/* Nothing to do if no ELP mode requested */
6691 	if (wl->sleep_auth != WL1271_PSM_ELP)
6692 		return 0;
6693 
6694 	wl12xx_for_each_wlvif(wl, wlvif) {
6695 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6696 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6697 			return -EBUSY;
6698 	}
6699 
6700 	wl1271_debug(DEBUG_PSM, "chip to elp");
6701 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6702 	if (error < 0) {
6703 		wl12xx_queue_recovery_work(wl);
6704 
6705 		return error;
6706 	}
6707 
6708 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6709 
6710 	return 0;
6711 }
6712 
wlcore_runtime_resume(struct device * dev)6713 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6714 {
6715 	struct wl1271 *wl = dev_get_drvdata(dev);
6716 	DECLARE_COMPLETION_ONSTACK(compl);
6717 	unsigned long flags;
6718 	int ret;
6719 	unsigned long start_time = jiffies;
6720 	bool pending = false;
6721 	bool recovery = false;
6722 
6723 	/* Nothing to do if no ELP mode requested */
6724 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6725 		return 0;
6726 
6727 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6728 
6729 	spin_lock_irqsave(&wl->wl_lock, flags);
6730 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6731 		pending = true;
6732 	else
6733 		wl->elp_compl = &compl;
6734 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6735 
6736 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6737 	if (ret < 0) {
6738 		recovery = true;
6739 		goto err;
6740 	}
6741 
6742 	if (!pending) {
6743 		ret = wait_for_completion_timeout(&compl,
6744 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6745 		if (ret == 0) {
6746 			wl1271_warning("ELP wakeup timeout!");
6747 
6748 			/* Return no error for runtime PM for recovery */
6749 			ret = 0;
6750 			recovery = true;
6751 			goto err;
6752 		}
6753 	}
6754 
6755 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6756 
6757 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6758 		     jiffies_to_msecs(jiffies - start_time));
6759 
6760 	return 0;
6761 
6762 err:
6763 	spin_lock_irqsave(&wl->wl_lock, flags);
6764 	wl->elp_compl = NULL;
6765 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6766 
6767 	if (recovery) {
6768 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6769 		wl12xx_queue_recovery_work(wl);
6770 	}
6771 
6772 	return ret;
6773 }
6774 
6775 static const struct dev_pm_ops wlcore_pm_ops = {
6776 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6777 			   wlcore_runtime_resume,
6778 			   NULL)
6779 };
6780 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6781 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6782 {
6783 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6784 	const char *nvs_name;
6785 	int ret = 0;
6786 
6787 	if (!wl->ops || !wl->ptable || !pdev_data)
6788 		return -EINVAL;
6789 
6790 	wl->dev = &pdev->dev;
6791 	wl->pdev = pdev;
6792 	platform_set_drvdata(pdev, wl);
6793 
6794 	if (pdev_data->family && pdev_data->family->nvs_name) {
6795 		nvs_name = pdev_data->family->nvs_name;
6796 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6797 					      nvs_name, &pdev->dev, GFP_KERNEL,
6798 					      wl, wlcore_nvs_cb);
6799 		if (ret < 0) {
6800 			wl1271_error("request_firmware_nowait failed for %s: %d",
6801 				     nvs_name, ret);
6802 			complete_all(&wl->nvs_loading_complete);
6803 		}
6804 	} else {
6805 		wlcore_nvs_cb(NULL, wl);
6806 	}
6807 
6808 	wl->dev->driver->pm = &wlcore_pm_ops;
6809 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6810 	pm_runtime_use_autosuspend(wl->dev);
6811 	pm_runtime_enable(wl->dev);
6812 
6813 	return ret;
6814 }
6815 EXPORT_SYMBOL_GPL(wlcore_probe);
6816 
wlcore_remove(struct platform_device * pdev)6817 int wlcore_remove(struct platform_device *pdev)
6818 {
6819 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6820 	struct wl1271 *wl = platform_get_drvdata(pdev);
6821 	int error;
6822 
6823 	error = pm_runtime_get_sync(wl->dev);
6824 	if (error < 0)
6825 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6826 
6827 	wl->dev->driver->pm = NULL;
6828 
6829 	if (pdev_data->family && pdev_data->family->nvs_name)
6830 		wait_for_completion(&wl->nvs_loading_complete);
6831 	if (!wl->initialized)
6832 		return 0;
6833 
6834 	if (wl->wakeirq >= 0) {
6835 		dev_pm_clear_wake_irq(wl->dev);
6836 		wl->wakeirq = -ENODEV;
6837 	}
6838 
6839 	device_init_wakeup(wl->dev, false);
6840 
6841 	if (wl->irq_wake_enabled)
6842 		disable_irq_wake(wl->irq);
6843 
6844 	wl1271_unregister_hw(wl);
6845 
6846 	pm_runtime_put_sync(wl->dev);
6847 	pm_runtime_dont_use_autosuspend(wl->dev);
6848 	pm_runtime_disable(wl->dev);
6849 
6850 	free_irq(wl->irq, wl);
6851 	wlcore_free_hw(wl);
6852 
6853 	return 0;
6854 }
6855 EXPORT_SYMBOL_GPL(wlcore_remove);
6856 
6857 u32 wl12xx_debug_level = DEBUG_NONE;
6858 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6859 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6860 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6861 
6862 module_param_named(fwlog, fwlog_param, charp, 0);
6863 MODULE_PARM_DESC(fwlog,
6864 		 "FW logger options: continuous, dbgpins or disable");
6865 
6866 module_param(fwlog_mem_blocks, int, 0600);
6867 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6868 
6869 module_param(bug_on_recovery, int, 0600);
6870 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6871 
6872 module_param(no_recovery, int, 0600);
6873 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6874 
6875 MODULE_LICENSE("GPL");
6876 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6877 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6878