1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Interrupt bottom half (BH).
4  *
5  * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6  * Copyright (c) 2010, ST-Ericsson
7  */
8 #include <linux/gpio/consumer.h>
9 #include <net/mac80211.h>
10 
11 #include "bh.h"
12 #include "wfx.h"
13 #include "hwio.h"
14 #include "traces.h"
15 #include "hif_rx.h"
16 #include "hif_api_cmd.h"
17 
device_wakeup(struct wfx_dev * wdev)18 static void device_wakeup(struct wfx_dev *wdev)
19 {
20 	int max_retry = 3;
21 
22 	if (!wdev->pdata.gpio_wakeup)
23 		return;
24 	if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0)
25 		return;
26 
27 	if (wfx_api_older_than(wdev, 1, 4)) {
28 		gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
29 		if (!completion_done(&wdev->hif.ctrl_ready))
30 			usleep_range(2000, 2500);
31 		return;
32 	}
33 	for (;;) {
34 		gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
35 		// completion.h does not provide any function to wait
36 		// completion without consume it (a kind of
37 		// wait_for_completion_done_timeout()). So we have to emulate
38 		// it.
39 		if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
40 						msecs_to_jiffies(2))) {
41 			complete(&wdev->hif.ctrl_ready);
42 			return;
43 		} else if (max_retry-- > 0) {
44 			// Older firmwares have a race in sleep/wake-up process.
45 			// Redo the process is sufficient to unfreeze the
46 			// chip.
47 			dev_err(wdev->dev, "timeout while wake up chip\n");
48 			gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
49 			usleep_range(2000, 2500);
50 		} else {
51 			dev_err(wdev->dev, "max wake-up retries reached\n");
52 			return;
53 		}
54 	}
55 }
56 
device_release(struct wfx_dev * wdev)57 static void device_release(struct wfx_dev *wdev)
58 {
59 	if (!wdev->pdata.gpio_wakeup)
60 		return;
61 
62 	gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
63 }
64 
rx_helper(struct wfx_dev * wdev,size_t read_len,int * is_cnf)65 static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
66 {
67 	struct sk_buff *skb;
68 	struct hif_msg *hif;
69 	size_t alloc_len;
70 	size_t computed_len;
71 	int release_count;
72 	int piggyback = 0;
73 
74 	WARN(read_len > round_down(0xFFF, 2) * sizeof(u16),
75 	     "%s: request exceed WFx capability", __func__);
76 
77 	// Add 2 to take into account piggyback size
78 	alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
79 	skb = dev_alloc_skb(alloc_len);
80 	if (!skb)
81 		return -ENOMEM;
82 
83 	if (wfx_data_read(wdev, skb->data, alloc_len))
84 		goto err;
85 
86 	piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2));
87 	_trace_piggyback(piggyback, false);
88 
89 	hif = (struct hif_msg *)skb->data;
90 	WARN(hif->encrypted & 0x3, "encryption is unsupported");
91 	if (WARN(read_len < sizeof(struct hif_msg), "corrupted read"))
92 		goto err;
93 	computed_len = le16_to_cpu(hif->len);
94 	computed_len = round_up(computed_len, 2);
95 	if (computed_len != read_len) {
96 		dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
97 			computed_len, read_len);
98 		print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
99 			       hif, read_len, true);
100 		goto err;
101 	}
102 
103 	if (!(hif->id & HIF_ID_IS_INDICATION)) {
104 		(*is_cnf)++;
105 		if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
106 			release_count = ((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs;
107 		else
108 			release_count = 1;
109 		WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
110 		wdev->hif.tx_buffers_used -= release_count;
111 	}
112 	_trace_hif_recv(hif, wdev->hif.tx_buffers_used);
113 
114 	if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
115 		if (hif->seqnum != wdev->hif.rx_seqnum)
116 			dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
117 				 hif->seqnum, wdev->hif.rx_seqnum);
118 		wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
119 	}
120 
121 	skb_put(skb, le16_to_cpu(hif->len));
122 	// wfx_handle_rx takes care on SKB livetime
123 	wfx_handle_rx(wdev, skb);
124 	if (!wdev->hif.tx_buffers_used)
125 		wake_up(&wdev->hif.tx_buffers_empty);
126 
127 	return piggyback;
128 
129 err:
130 	if (skb)
131 		dev_kfree_skb(skb);
132 	return -EIO;
133 }
134 
bh_work_rx(struct wfx_dev * wdev,int max_msg,int * num_cnf)135 static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
136 {
137 	size_t len;
138 	int i;
139 	int ctrl_reg, piggyback;
140 
141 	piggyback = 0;
142 	for (i = 0; i < max_msg; i++) {
143 		if (piggyback & CTRL_NEXT_LEN_MASK)
144 			ctrl_reg = piggyback;
145 		else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
146 			ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
147 		else
148 			ctrl_reg = 0;
149 		if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
150 			return i;
151 		// ctrl_reg units are 16bits words
152 		len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
153 		piggyback = rx_helper(wdev, len, num_cnf);
154 		if (piggyback < 0)
155 			return i;
156 		if (!(piggyback & CTRL_WLAN_READY))
157 			dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
158 				piggyback);
159 	}
160 	if (piggyback & CTRL_NEXT_LEN_MASK) {
161 		ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
162 		complete(&wdev->hif.ctrl_ready);
163 		if (ctrl_reg)
164 			dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
165 				ctrl_reg, piggyback);
166 	}
167 	return i;
168 }
169 
tx_helper(struct wfx_dev * wdev,struct hif_msg * hif)170 static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
171 {
172 	int ret;
173 	void *data;
174 	bool is_encrypted = false;
175 	size_t len = le16_to_cpu(hif->len);
176 
177 	WARN(len < sizeof(*hif), "try to send corrupted data");
178 
179 	hif->seqnum = wdev->hif.tx_seqnum;
180 	wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
181 
182 	data = hif;
183 	WARN(len > wdev->hw_caps.size_inp_ch_buf,
184 	     "%s: request exceed WFx capability: %zu > %d\n", __func__,
185 	     len, wdev->hw_caps.size_inp_ch_buf);
186 	len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
187 	ret = wfx_data_write(wdev, data, len);
188 	if (ret)
189 		goto end;
190 
191 	wdev->hif.tx_buffers_used++;
192 	_trace_hif_send(hif, wdev->hif.tx_buffers_used);
193 end:
194 	if (is_encrypted)
195 		kfree(data);
196 }
197 
bh_work_tx(struct wfx_dev * wdev,int max_msg)198 static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
199 {
200 	struct hif_msg *hif;
201 	int i;
202 
203 	for (i = 0; i < max_msg; i++) {
204 		hif = NULL;
205 		if (wdev->hif.tx_buffers_used < wdev->hw_caps.num_inp_ch_bufs) {
206 			if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
207 				WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
208 				hif = wdev->hif_cmd.buf_send;
209 			} else {
210 				hif = wfx_tx_queues_get(wdev);
211 			}
212 		}
213 		if (!hif)
214 			return i;
215 		tx_helper(wdev, hif);
216 	}
217 	return i;
218 }
219 
220 /* In SDIO mode, it is necessary to make an access to a register to acknowledge
221  * last received message. It could be possible to restrict this acknowledge to
222  * SDIO mode and only if last operation was rx.
223  */
ack_sdio_data(struct wfx_dev * wdev)224 static void ack_sdio_data(struct wfx_dev *wdev)
225 {
226 	u32 cfg_reg;
227 
228 	config_reg_read(wdev, &cfg_reg);
229 	if (cfg_reg & 0xFF) {
230 		dev_warn(wdev->dev, "chip reports errors: %02x\n",
231 			 cfg_reg & 0xFF);
232 		config_reg_write_bits(wdev, 0xFF, 0x00);
233 	}
234 }
235 
bh_work(struct work_struct * work)236 static void bh_work(struct work_struct *work)
237 {
238 	struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
239 	int stats_req = 0, stats_cnf = 0, stats_ind = 0;
240 	bool release_chip = false, last_op_is_rx = false;
241 	int num_tx, num_rx;
242 
243 	device_wakeup(wdev);
244 	do {
245 		num_tx = bh_work_tx(wdev, 32);
246 		stats_req += num_tx;
247 		if (num_tx)
248 			last_op_is_rx = false;
249 		num_rx = bh_work_rx(wdev, 32, &stats_cnf);
250 		stats_ind += num_rx;
251 		if (num_rx)
252 			last_op_is_rx = true;
253 	} while (num_rx || num_tx);
254 	stats_ind -= stats_cnf;
255 
256 	if (last_op_is_rx)
257 		ack_sdio_data(wdev);
258 	if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
259 		device_release(wdev);
260 		release_chip = true;
261 	}
262 	_trace_bh_stats(stats_ind, stats_req, stats_cnf,
263 			wdev->hif.tx_buffers_used, release_chip);
264 }
265 
266 /*
267  * An IRQ from chip did occur
268  */
wfx_bh_request_rx(struct wfx_dev * wdev)269 void wfx_bh_request_rx(struct wfx_dev *wdev)
270 {
271 	u32 cur, prev;
272 
273 	control_reg_read(wdev, &cur);
274 	prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
275 	complete(&wdev->hif.ctrl_ready);
276 	queue_work(system_highpri_wq, &wdev->hif.bh);
277 
278 	if (!(cur & CTRL_NEXT_LEN_MASK))
279 		dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
280 			cur);
281 	if (prev != 0)
282 		dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
283 			prev, cur);
284 }
285 
286 /*
287  * Driver want to send data
288  */
wfx_bh_request_tx(struct wfx_dev * wdev)289 void wfx_bh_request_tx(struct wfx_dev *wdev)
290 {
291 	queue_work(system_highpri_wq, &wdev->hif.bh);
292 }
293 
294 /*
295  * If IRQ is not available, this function allow to manually poll the control
296  * register and simulate an IRQ ahen an event happened.
297  *
298  * Note that the device has a bug: If an IRQ raise while host read control
299  * register, the IRQ is lost. So, use this function carefully (only duing
300  * device initialisation).
301  */
wfx_bh_poll_irq(struct wfx_dev * wdev)302 void wfx_bh_poll_irq(struct wfx_dev *wdev)
303 {
304 	ktime_t now, start;
305 	u32 reg;
306 
307 	WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
308 	start = ktime_get();
309 	for (;;) {
310 		control_reg_read(wdev, &reg);
311 		now = ktime_get();
312 		if (reg & 0xFFF)
313 			break;
314 		if (ktime_after(now, ktime_add_ms(start, 1000))) {
315 			dev_err(wdev->dev, "time out while polling control register\n");
316 			return;
317 		}
318 		udelay(200);
319 	}
320 	wfx_bh_request_rx(wdev);
321 }
322 
wfx_bh_register(struct wfx_dev * wdev)323 void wfx_bh_register(struct wfx_dev *wdev)
324 {
325 	INIT_WORK(&wdev->hif.bh, bh_work);
326 	init_completion(&wdev->hif.ctrl_ready);
327 	init_waitqueue_head(&wdev->hif.tx_buffers_empty);
328 }
329 
wfx_bh_unregister(struct wfx_dev * wdev)330 void wfx_bh_unregister(struct wfx_dev *wdev)
331 {
332 	flush_work(&wdev->hif.bh);
333 }
334