1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for the Diolan DLN-2 USB adapter
4  *
5  * Copyright (c) 2014 Intel Corporation
6  *
7  * Derived from:
8  *  i2c-diolan-u2c.c
9  *  Copyright (c) 2010-2011 Ericsson AB
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/usb.h>
17 #include <linux/i2c.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/mfd/core.h>
21 #include <linux/mfd/dln2.h>
22 #include <linux/rculist.h>
23 
24 struct dln2_header {
25 	__le16 size;
26 	__le16 id;
27 	__le16 echo;
28 	__le16 handle;
29 };
30 
31 struct dln2_response {
32 	struct dln2_header hdr;
33 	__le16 result;
34 };
35 
36 #define DLN2_GENERIC_MODULE_ID		0x00
37 #define DLN2_GENERIC_CMD(cmd)		DLN2_CMD(cmd, DLN2_GENERIC_MODULE_ID)
38 #define CMD_GET_DEVICE_VER		DLN2_GENERIC_CMD(0x30)
39 #define CMD_GET_DEVICE_SN		DLN2_GENERIC_CMD(0x31)
40 
41 #define DLN2_HW_ID			0x200
42 #define DLN2_USB_TIMEOUT		200	/* in ms */
43 #define DLN2_MAX_RX_SLOTS		16
44 #define DLN2_MAX_URBS			16
45 #define DLN2_RX_BUF_SIZE		512
46 
47 enum dln2_handle {
48 	DLN2_HANDLE_EVENT = 0,		/* don't change, hardware defined */
49 	DLN2_HANDLE_CTRL,
50 	DLN2_HANDLE_GPIO,
51 	DLN2_HANDLE_I2C,
52 	DLN2_HANDLE_SPI,
53 	DLN2_HANDLES
54 };
55 
56 /*
57  * Receive context used between the receive demultiplexer and the transfer
58  * routine. While sending a request the transfer routine will look for a free
59  * receive context and use it to wait for a response and to receive the URB and
60  * thus the response data.
61  */
62 struct dln2_rx_context {
63 	/* completion used to wait for a response */
64 	struct completion done;
65 
66 	/* if non-NULL the URB contains the response */
67 	struct urb *urb;
68 
69 	/* if true then this context is used to wait for a response */
70 	bool in_use;
71 };
72 
73 /*
74  * Receive contexts for a particular DLN2 module (i2c, gpio, etc.). We use the
75  * handle header field to identify the module in dln2_dev.mod_rx_slots and then
76  * the echo header field to index the slots field and find the receive context
77  * for a particular request.
78  */
79 struct dln2_mod_rx_slots {
80 	/* RX slots bitmap */
81 	DECLARE_BITMAP(bmap, DLN2_MAX_RX_SLOTS);
82 
83 	/* used to wait for a free RX slot */
84 	wait_queue_head_t wq;
85 
86 	/* used to wait for an RX operation to complete */
87 	struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS];
88 
89 	/* avoid races between alloc/free_rx_slot and dln2_rx_transfer */
90 	spinlock_t lock;
91 };
92 
93 enum dln2_endpoint {
94 	DLN2_EP_OUT	= 0,
95 	DLN2_EP_IN	= 1,
96 };
97 
98 struct dln2_dev {
99 	struct usb_device *usb_dev;
100 	struct usb_interface *interface;
101 	u8 ep_in;
102 	u8 ep_out;
103 
104 	struct urb *rx_urb[DLN2_MAX_URBS];
105 	void *rx_buf[DLN2_MAX_URBS];
106 
107 	struct dln2_mod_rx_slots mod_rx_slots[DLN2_HANDLES];
108 
109 	struct list_head event_cb_list;
110 	spinlock_t event_cb_lock;
111 
112 	bool disconnect;
113 	int active_transfers;
114 	wait_queue_head_t disconnect_wq;
115 	spinlock_t disconnect_lock;
116 };
117 
118 struct dln2_event_cb_entry {
119 	struct list_head list;
120 	u16 id;
121 	struct platform_device *pdev;
122 	dln2_event_cb_t callback;
123 };
124 
dln2_register_event_cb(struct platform_device * pdev,u16 id,dln2_event_cb_t event_cb)125 int dln2_register_event_cb(struct platform_device *pdev, u16 id,
126 			   dln2_event_cb_t event_cb)
127 {
128 	struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
129 	struct dln2_event_cb_entry *i, *entry;
130 	unsigned long flags;
131 	int ret = 0;
132 
133 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
134 	if (!entry)
135 		return -ENOMEM;
136 
137 	entry->id = id;
138 	entry->callback = event_cb;
139 	entry->pdev = pdev;
140 
141 	spin_lock_irqsave(&dln2->event_cb_lock, flags);
142 
143 	list_for_each_entry(i, &dln2->event_cb_list, list) {
144 		if (i->id == id) {
145 			ret = -EBUSY;
146 			break;
147 		}
148 	}
149 
150 	if (!ret)
151 		list_add_rcu(&entry->list, &dln2->event_cb_list);
152 
153 	spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
154 
155 	if (ret)
156 		kfree(entry);
157 
158 	return ret;
159 }
160 EXPORT_SYMBOL(dln2_register_event_cb);
161 
dln2_unregister_event_cb(struct platform_device * pdev,u16 id)162 void dln2_unregister_event_cb(struct platform_device *pdev, u16 id)
163 {
164 	struct dln2_dev *dln2 = dev_get_drvdata(pdev->dev.parent);
165 	struct dln2_event_cb_entry *i;
166 	unsigned long flags;
167 	bool found = false;
168 
169 	spin_lock_irqsave(&dln2->event_cb_lock, flags);
170 
171 	list_for_each_entry(i, &dln2->event_cb_list, list) {
172 		if (i->id == id) {
173 			list_del_rcu(&i->list);
174 			found = true;
175 			break;
176 		}
177 	}
178 
179 	spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
180 
181 	if (found) {
182 		synchronize_rcu();
183 		kfree(i);
184 	}
185 }
186 EXPORT_SYMBOL(dln2_unregister_event_cb);
187 
188 /*
189  * Returns true if a valid transfer slot is found. In this case the URB must not
190  * be resubmitted immediately in dln2_rx as we need the data when dln2_transfer
191  * is woke up. It will be resubmitted there.
192  */
dln2_transfer_complete(struct dln2_dev * dln2,struct urb * urb,u16 handle,u16 rx_slot)193 static bool dln2_transfer_complete(struct dln2_dev *dln2, struct urb *urb,
194 				   u16 handle, u16 rx_slot)
195 {
196 	struct device *dev = &dln2->interface->dev;
197 	struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
198 	struct dln2_rx_context *rxc;
199 	unsigned long flags;
200 	bool valid_slot = false;
201 
202 	if (rx_slot >= DLN2_MAX_RX_SLOTS)
203 		goto out;
204 
205 	rxc = &rxs->slots[rx_slot];
206 
207 	spin_lock_irqsave(&rxs->lock, flags);
208 	if (rxc->in_use && !rxc->urb) {
209 		rxc->urb = urb;
210 		complete(&rxc->done);
211 		valid_slot = true;
212 	}
213 	spin_unlock_irqrestore(&rxs->lock, flags);
214 
215 out:
216 	if (!valid_slot)
217 		dev_warn(dev, "bad/late response %d/%d\n", handle, rx_slot);
218 
219 	return valid_slot;
220 }
221 
dln2_run_event_callbacks(struct dln2_dev * dln2,u16 id,u16 echo,void * data,int len)222 static void dln2_run_event_callbacks(struct dln2_dev *dln2, u16 id, u16 echo,
223 				     void *data, int len)
224 {
225 	struct dln2_event_cb_entry *i;
226 
227 	rcu_read_lock();
228 
229 	list_for_each_entry_rcu(i, &dln2->event_cb_list, list) {
230 		if (i->id == id) {
231 			i->callback(i->pdev, echo, data, len);
232 			break;
233 		}
234 	}
235 
236 	rcu_read_unlock();
237 }
238 
dln2_rx(struct urb * urb)239 static void dln2_rx(struct urb *urb)
240 {
241 	struct dln2_dev *dln2 = urb->context;
242 	struct dln2_header *hdr = urb->transfer_buffer;
243 	struct device *dev = &dln2->interface->dev;
244 	u16 id, echo, handle, size;
245 	u8 *data;
246 	int len;
247 	int err;
248 
249 	switch (urb->status) {
250 	case 0:
251 		/* success */
252 		break;
253 	case -ECONNRESET:
254 	case -ENOENT:
255 	case -ESHUTDOWN:
256 	case -EPIPE:
257 		/* this urb is terminated, clean up */
258 		dev_dbg(dev, "urb shutting down with status %d\n", urb->status);
259 		return;
260 	default:
261 		dev_dbg(dev, "nonzero urb status received %d\n", urb->status);
262 		goto out;
263 	}
264 
265 	if (urb->actual_length < sizeof(struct dln2_header)) {
266 		dev_err(dev, "short response: %d\n", urb->actual_length);
267 		goto out;
268 	}
269 
270 	handle = le16_to_cpu(hdr->handle);
271 	id = le16_to_cpu(hdr->id);
272 	echo = le16_to_cpu(hdr->echo);
273 	size = le16_to_cpu(hdr->size);
274 
275 	if (size != urb->actual_length) {
276 		dev_err(dev, "size mismatch: handle %x cmd %x echo %x size %d actual %d\n",
277 			handle, id, echo, size, urb->actual_length);
278 		goto out;
279 	}
280 
281 	if (handle >= DLN2_HANDLES) {
282 		dev_warn(dev, "invalid handle %d\n", handle);
283 		goto out;
284 	}
285 
286 	data = urb->transfer_buffer + sizeof(struct dln2_header);
287 	len = urb->actual_length - sizeof(struct dln2_header);
288 
289 	if (handle == DLN2_HANDLE_EVENT) {
290 		unsigned long flags;
291 
292 		spin_lock_irqsave(&dln2->event_cb_lock, flags);
293 		dln2_run_event_callbacks(dln2, id, echo, data, len);
294 		spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
295 	} else {
296 		/* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
297 		if (dln2_transfer_complete(dln2, urb, handle, echo))
298 			return;
299 	}
300 
301 out:
302 	err = usb_submit_urb(urb, GFP_ATOMIC);
303 	if (err < 0)
304 		dev_err(dev, "failed to resubmit RX URB: %d\n", err);
305 }
306 
dln2_prep_buf(u16 handle,u16 cmd,u16 echo,const void * obuf,int * obuf_len,gfp_t gfp)307 static void *dln2_prep_buf(u16 handle, u16 cmd, u16 echo, const void *obuf,
308 			   int *obuf_len, gfp_t gfp)
309 {
310 	int len;
311 	void *buf;
312 	struct dln2_header *hdr;
313 
314 	len = *obuf_len + sizeof(*hdr);
315 	buf = kmalloc(len, gfp);
316 	if (!buf)
317 		return NULL;
318 
319 	hdr = (struct dln2_header *)buf;
320 	hdr->id = cpu_to_le16(cmd);
321 	hdr->size = cpu_to_le16(len);
322 	hdr->echo = cpu_to_le16(echo);
323 	hdr->handle = cpu_to_le16(handle);
324 
325 	memcpy(buf + sizeof(*hdr), obuf, *obuf_len);
326 
327 	*obuf_len = len;
328 
329 	return buf;
330 }
331 
dln2_send_wait(struct dln2_dev * dln2,u16 handle,u16 cmd,u16 echo,const void * obuf,int obuf_len)332 static int dln2_send_wait(struct dln2_dev *dln2, u16 handle, u16 cmd, u16 echo,
333 			  const void *obuf, int obuf_len)
334 {
335 	int ret = 0;
336 	int len = obuf_len;
337 	void *buf;
338 	int actual;
339 
340 	buf = dln2_prep_buf(handle, cmd, echo, obuf, &len, GFP_KERNEL);
341 	if (!buf)
342 		return -ENOMEM;
343 
344 	ret = usb_bulk_msg(dln2->usb_dev,
345 			   usb_sndbulkpipe(dln2->usb_dev, dln2->ep_out),
346 			   buf, len, &actual, DLN2_USB_TIMEOUT);
347 
348 	kfree(buf);
349 
350 	return ret;
351 }
352 
find_free_slot(struct dln2_dev * dln2,u16 handle,int * slot)353 static bool find_free_slot(struct dln2_dev *dln2, u16 handle, int *slot)
354 {
355 	struct dln2_mod_rx_slots *rxs;
356 	unsigned long flags;
357 
358 	if (dln2->disconnect) {
359 		*slot = -ENODEV;
360 		return true;
361 	}
362 
363 	rxs = &dln2->mod_rx_slots[handle];
364 
365 	spin_lock_irqsave(&rxs->lock, flags);
366 
367 	*slot = find_first_zero_bit(rxs->bmap, DLN2_MAX_RX_SLOTS);
368 
369 	if (*slot < DLN2_MAX_RX_SLOTS) {
370 		struct dln2_rx_context *rxc = &rxs->slots[*slot];
371 
372 		set_bit(*slot, rxs->bmap);
373 		rxc->in_use = true;
374 	}
375 
376 	spin_unlock_irqrestore(&rxs->lock, flags);
377 
378 	return *slot < DLN2_MAX_RX_SLOTS;
379 }
380 
alloc_rx_slot(struct dln2_dev * dln2,u16 handle)381 static int alloc_rx_slot(struct dln2_dev *dln2, u16 handle)
382 {
383 	int ret;
384 	int slot;
385 
386 	/*
387 	 * No need to timeout here, the wait is bounded by the timeout in
388 	 * _dln2_transfer.
389 	 */
390 	ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq,
391 				       find_free_slot(dln2, handle, &slot));
392 	if (ret < 0)
393 		return ret;
394 
395 	return slot;
396 }
397 
free_rx_slot(struct dln2_dev * dln2,u16 handle,int slot)398 static void free_rx_slot(struct dln2_dev *dln2, u16 handle, int slot)
399 {
400 	struct dln2_mod_rx_slots *rxs;
401 	struct urb *urb = NULL;
402 	unsigned long flags;
403 	struct dln2_rx_context *rxc;
404 
405 	rxs = &dln2->mod_rx_slots[handle];
406 
407 	spin_lock_irqsave(&rxs->lock, flags);
408 
409 	clear_bit(slot, rxs->bmap);
410 
411 	rxc = &rxs->slots[slot];
412 	rxc->in_use = false;
413 	urb = rxc->urb;
414 	rxc->urb = NULL;
415 	reinit_completion(&rxc->done);
416 
417 	spin_unlock_irqrestore(&rxs->lock, flags);
418 
419 	if (urb) {
420 		int err;
421 		struct device *dev = &dln2->interface->dev;
422 
423 		err = usb_submit_urb(urb, GFP_KERNEL);
424 		if (err < 0)
425 			dev_err(dev, "failed to resubmit RX URB: %d\n", err);
426 	}
427 
428 	wake_up_interruptible(&rxs->wq);
429 }
430 
_dln2_transfer(struct dln2_dev * dln2,u16 handle,u16 cmd,const void * obuf,unsigned obuf_len,void * ibuf,unsigned * ibuf_len)431 static int _dln2_transfer(struct dln2_dev *dln2, u16 handle, u16 cmd,
432 			  const void *obuf, unsigned obuf_len,
433 			  void *ibuf, unsigned *ibuf_len)
434 {
435 	int ret = 0;
436 	int rx_slot;
437 	struct dln2_response *rsp;
438 	struct dln2_rx_context *rxc;
439 	struct device *dev = &dln2->interface->dev;
440 	const unsigned long timeout = msecs_to_jiffies(DLN2_USB_TIMEOUT);
441 	struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[handle];
442 	int size;
443 
444 	spin_lock(&dln2->disconnect_lock);
445 	if (!dln2->disconnect)
446 		dln2->active_transfers++;
447 	else
448 		ret = -ENODEV;
449 	spin_unlock(&dln2->disconnect_lock);
450 
451 	if (ret)
452 		return ret;
453 
454 	rx_slot = alloc_rx_slot(dln2, handle);
455 	if (rx_slot < 0) {
456 		ret = rx_slot;
457 		goto out_decr;
458 	}
459 
460 	ret = dln2_send_wait(dln2, handle, cmd, rx_slot, obuf, obuf_len);
461 	if (ret < 0) {
462 		dev_err(dev, "USB write failed: %d\n", ret);
463 		goto out_free_rx_slot;
464 	}
465 
466 	rxc = &rxs->slots[rx_slot];
467 
468 	ret = wait_for_completion_interruptible_timeout(&rxc->done, timeout);
469 	if (ret <= 0) {
470 		if (!ret)
471 			ret = -ETIMEDOUT;
472 		goto out_free_rx_slot;
473 	} else {
474 		ret = 0;
475 	}
476 
477 	if (dln2->disconnect) {
478 		ret = -ENODEV;
479 		goto out_free_rx_slot;
480 	}
481 
482 	/* if we got here we know that the response header has been checked */
483 	rsp = rxc->urb->transfer_buffer;
484 	size = le16_to_cpu(rsp->hdr.size);
485 
486 	if (size < sizeof(*rsp)) {
487 		ret = -EPROTO;
488 		goto out_free_rx_slot;
489 	}
490 
491 	if (le16_to_cpu(rsp->result) > 0x80) {
492 		dev_dbg(dev, "%d received response with error %d\n",
493 			handle, le16_to_cpu(rsp->result));
494 		ret = -EREMOTEIO;
495 		goto out_free_rx_slot;
496 	}
497 
498 	if (!ibuf)
499 		goto out_free_rx_slot;
500 
501 	if (*ibuf_len > size - sizeof(*rsp))
502 		*ibuf_len = size - sizeof(*rsp);
503 
504 	memcpy(ibuf, rsp + 1, *ibuf_len);
505 
506 out_free_rx_slot:
507 	free_rx_slot(dln2, handle, rx_slot);
508 out_decr:
509 	spin_lock(&dln2->disconnect_lock);
510 	dln2->active_transfers--;
511 	spin_unlock(&dln2->disconnect_lock);
512 	if (dln2->disconnect)
513 		wake_up(&dln2->disconnect_wq);
514 
515 	return ret;
516 }
517 
dln2_transfer(struct platform_device * pdev,u16 cmd,const void * obuf,unsigned obuf_len,void * ibuf,unsigned * ibuf_len)518 int dln2_transfer(struct platform_device *pdev, u16 cmd,
519 		  const void *obuf, unsigned obuf_len,
520 		  void *ibuf, unsigned *ibuf_len)
521 {
522 	struct dln2_platform_data *dln2_pdata;
523 	struct dln2_dev *dln2;
524 	u16 handle;
525 
526 	dln2 = dev_get_drvdata(pdev->dev.parent);
527 	dln2_pdata = dev_get_platdata(&pdev->dev);
528 	handle = dln2_pdata->handle;
529 
530 	return _dln2_transfer(dln2, handle, cmd, obuf, obuf_len, ibuf,
531 			      ibuf_len);
532 }
533 EXPORT_SYMBOL(dln2_transfer);
534 
dln2_check_hw(struct dln2_dev * dln2)535 static int dln2_check_hw(struct dln2_dev *dln2)
536 {
537 	int ret;
538 	__le32 hw_type;
539 	int len = sizeof(hw_type);
540 
541 	ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_VER,
542 			     NULL, 0, &hw_type, &len);
543 	if (ret < 0)
544 		return ret;
545 	if (len < sizeof(hw_type))
546 		return -EREMOTEIO;
547 
548 	if (le32_to_cpu(hw_type) != DLN2_HW_ID) {
549 		dev_err(&dln2->interface->dev, "Device ID 0x%x not supported\n",
550 			le32_to_cpu(hw_type));
551 		return -ENODEV;
552 	}
553 
554 	return 0;
555 }
556 
dln2_print_serialno(struct dln2_dev * dln2)557 static int dln2_print_serialno(struct dln2_dev *dln2)
558 {
559 	int ret;
560 	__le32 serial_no;
561 	int len = sizeof(serial_no);
562 	struct device *dev = &dln2->interface->dev;
563 
564 	ret = _dln2_transfer(dln2, DLN2_HANDLE_CTRL, CMD_GET_DEVICE_SN, NULL, 0,
565 			     &serial_no, &len);
566 	if (ret < 0)
567 		return ret;
568 	if (len < sizeof(serial_no))
569 		return -EREMOTEIO;
570 
571 	dev_info(dev, "Diolan DLN2 serial %u\n", le32_to_cpu(serial_no));
572 
573 	return 0;
574 }
575 
dln2_hw_init(struct dln2_dev * dln2)576 static int dln2_hw_init(struct dln2_dev *dln2)
577 {
578 	int ret;
579 
580 	ret = dln2_check_hw(dln2);
581 	if (ret < 0)
582 		return ret;
583 
584 	return dln2_print_serialno(dln2);
585 }
586 
dln2_free_rx_urbs(struct dln2_dev * dln2)587 static void dln2_free_rx_urbs(struct dln2_dev *dln2)
588 {
589 	int i;
590 
591 	for (i = 0; i < DLN2_MAX_URBS; i++) {
592 		usb_free_urb(dln2->rx_urb[i]);
593 		kfree(dln2->rx_buf[i]);
594 	}
595 }
596 
dln2_stop_rx_urbs(struct dln2_dev * dln2)597 static void dln2_stop_rx_urbs(struct dln2_dev *dln2)
598 {
599 	int i;
600 
601 	for (i = 0; i < DLN2_MAX_URBS; i++)
602 		usb_kill_urb(dln2->rx_urb[i]);
603 }
604 
dln2_free(struct dln2_dev * dln2)605 static void dln2_free(struct dln2_dev *dln2)
606 {
607 	dln2_free_rx_urbs(dln2);
608 	usb_put_dev(dln2->usb_dev);
609 	kfree(dln2);
610 }
611 
dln2_setup_rx_urbs(struct dln2_dev * dln2,struct usb_host_interface * hostif)612 static int dln2_setup_rx_urbs(struct dln2_dev *dln2,
613 			      struct usb_host_interface *hostif)
614 {
615 	int i;
616 	const int rx_max_size = DLN2_RX_BUF_SIZE;
617 
618 	for (i = 0; i < DLN2_MAX_URBS; i++) {
619 		dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL);
620 		if (!dln2->rx_buf[i])
621 			return -ENOMEM;
622 
623 		dln2->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
624 		if (!dln2->rx_urb[i])
625 			return -ENOMEM;
626 
627 		usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev,
628 				  usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in),
629 				  dln2->rx_buf[i], rx_max_size, dln2_rx, dln2);
630 	}
631 
632 	return 0;
633 }
634 
dln2_start_rx_urbs(struct dln2_dev * dln2,gfp_t gfp)635 static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
636 {
637 	struct device *dev = &dln2->interface->dev;
638 	int ret;
639 	int i;
640 
641 	for (i = 0; i < DLN2_MAX_URBS; i++) {
642 		ret = usb_submit_urb(dln2->rx_urb[i], gfp);
643 		if (ret < 0) {
644 			dev_err(dev, "failed to submit RX URB: %d\n", ret);
645 			return ret;
646 		}
647 	}
648 
649 	return 0;
650 }
651 
652 enum {
653 	DLN2_ACPI_MATCH_GPIO	= 0,
654 	DLN2_ACPI_MATCH_I2C	= 1,
655 	DLN2_ACPI_MATCH_SPI	= 2,
656 };
657 
658 static struct dln2_platform_data dln2_pdata_gpio = {
659 	.handle = DLN2_HANDLE_GPIO,
660 };
661 
662 static struct mfd_cell_acpi_match dln2_acpi_match_gpio = {
663 	.adr = DLN2_ACPI_MATCH_GPIO,
664 };
665 
666 /* Only one I2C port seems to be supported on current hardware */
667 static struct dln2_platform_data dln2_pdata_i2c = {
668 	.handle = DLN2_HANDLE_I2C,
669 	.port = 0,
670 };
671 
672 static struct mfd_cell_acpi_match dln2_acpi_match_i2c = {
673 	.adr = DLN2_ACPI_MATCH_I2C,
674 };
675 
676 /* Only one SPI port supported */
677 static struct dln2_platform_data dln2_pdata_spi = {
678 	.handle = DLN2_HANDLE_SPI,
679 	.port = 0,
680 };
681 
682 static struct mfd_cell_acpi_match dln2_acpi_match_spi = {
683 	.adr = DLN2_ACPI_MATCH_SPI,
684 };
685 
686 static const struct mfd_cell dln2_devs[] = {
687 	{
688 		.name = "dln2-gpio",
689 		.acpi_match = &dln2_acpi_match_gpio,
690 		.platform_data = &dln2_pdata_gpio,
691 		.pdata_size = sizeof(struct dln2_platform_data),
692 	},
693 	{
694 		.name = "dln2-i2c",
695 		.acpi_match = &dln2_acpi_match_i2c,
696 		.platform_data = &dln2_pdata_i2c,
697 		.pdata_size = sizeof(struct dln2_platform_data),
698 	},
699 	{
700 		.name = "dln2-spi",
701 		.acpi_match = &dln2_acpi_match_spi,
702 		.platform_data = &dln2_pdata_spi,
703 		.pdata_size = sizeof(struct dln2_platform_data),
704 	},
705 };
706 
dln2_stop(struct dln2_dev * dln2)707 static void dln2_stop(struct dln2_dev *dln2)
708 {
709 	int i, j;
710 
711 	/* don't allow starting new transfers */
712 	spin_lock(&dln2->disconnect_lock);
713 	dln2->disconnect = true;
714 	spin_unlock(&dln2->disconnect_lock);
715 
716 	/* cancel in progress transfers */
717 	for (i = 0; i < DLN2_HANDLES; i++) {
718 		struct dln2_mod_rx_slots *rxs = &dln2->mod_rx_slots[i];
719 		unsigned long flags;
720 
721 		spin_lock_irqsave(&rxs->lock, flags);
722 
723 		/* cancel all response waiters */
724 		for (j = 0; j < DLN2_MAX_RX_SLOTS; j++) {
725 			struct dln2_rx_context *rxc = &rxs->slots[j];
726 
727 			if (rxc->in_use)
728 				complete(&rxc->done);
729 		}
730 
731 		spin_unlock_irqrestore(&rxs->lock, flags);
732 	}
733 
734 	/* wait for transfers to end */
735 	wait_event(dln2->disconnect_wq, !dln2->active_transfers);
736 
737 	dln2_stop_rx_urbs(dln2);
738 }
739 
dln2_disconnect(struct usb_interface * interface)740 static void dln2_disconnect(struct usb_interface *interface)
741 {
742 	struct dln2_dev *dln2 = usb_get_intfdata(interface);
743 
744 	dln2_stop(dln2);
745 
746 	mfd_remove_devices(&interface->dev);
747 
748 	dln2_free(dln2);
749 }
750 
dln2_probe(struct usb_interface * interface,const struct usb_device_id * usb_id)751 static int dln2_probe(struct usb_interface *interface,
752 		      const struct usb_device_id *usb_id)
753 {
754 	struct usb_host_interface *hostif = interface->cur_altsetting;
755 	struct usb_endpoint_descriptor *epin;
756 	struct usb_endpoint_descriptor *epout;
757 	struct device *dev = &interface->dev;
758 	struct dln2_dev *dln2;
759 	int ret;
760 	int i, j;
761 
762 	if (hostif->desc.bInterfaceNumber != 0 ||
763 	    hostif->desc.bNumEndpoints < 2)
764 		return -ENODEV;
765 
766 	epout = &hostif->endpoint[DLN2_EP_OUT].desc;
767 	if (!usb_endpoint_is_bulk_out(epout))
768 		return -ENODEV;
769 	epin = &hostif->endpoint[DLN2_EP_IN].desc;
770 	if (!usb_endpoint_is_bulk_in(epin))
771 		return -ENODEV;
772 
773 	dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
774 	if (!dln2)
775 		return -ENOMEM;
776 
777 	dln2->ep_out = epout->bEndpointAddress;
778 	dln2->ep_in = epin->bEndpointAddress;
779 	dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
780 	dln2->interface = interface;
781 	usb_set_intfdata(interface, dln2);
782 	init_waitqueue_head(&dln2->disconnect_wq);
783 
784 	for (i = 0; i < DLN2_HANDLES; i++) {
785 		init_waitqueue_head(&dln2->mod_rx_slots[i].wq);
786 		spin_lock_init(&dln2->mod_rx_slots[i].lock);
787 		for (j = 0; j < DLN2_MAX_RX_SLOTS; j++)
788 			init_completion(&dln2->mod_rx_slots[i].slots[j].done);
789 	}
790 
791 	spin_lock_init(&dln2->event_cb_lock);
792 	spin_lock_init(&dln2->disconnect_lock);
793 	INIT_LIST_HEAD(&dln2->event_cb_list);
794 
795 	ret = dln2_setup_rx_urbs(dln2, hostif);
796 	if (ret)
797 		goto out_free;
798 
799 	ret = dln2_start_rx_urbs(dln2, GFP_KERNEL);
800 	if (ret)
801 		goto out_stop_rx;
802 
803 	ret = dln2_hw_init(dln2);
804 	if (ret < 0) {
805 		dev_err(dev, "failed to initialize hardware\n");
806 		goto out_stop_rx;
807 	}
808 
809 	ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs));
810 	if (ret != 0) {
811 		dev_err(dev, "failed to add mfd devices to core\n");
812 		goto out_stop_rx;
813 	}
814 
815 	return 0;
816 
817 out_stop_rx:
818 	dln2_stop_rx_urbs(dln2);
819 
820 out_free:
821 	dln2_free(dln2);
822 
823 	return ret;
824 }
825 
dln2_suspend(struct usb_interface * iface,pm_message_t message)826 static int dln2_suspend(struct usb_interface *iface, pm_message_t message)
827 {
828 	struct dln2_dev *dln2 = usb_get_intfdata(iface);
829 
830 	dln2_stop(dln2);
831 
832 	return 0;
833 }
834 
dln2_resume(struct usb_interface * iface)835 static int dln2_resume(struct usb_interface *iface)
836 {
837 	struct dln2_dev *dln2 = usb_get_intfdata(iface);
838 
839 	dln2->disconnect = false;
840 
841 	return dln2_start_rx_urbs(dln2, GFP_NOIO);
842 }
843 
844 static const struct usb_device_id dln2_table[] = {
845 	{ USB_DEVICE(0xa257, 0x2013) },
846 	{ }
847 };
848 
849 MODULE_DEVICE_TABLE(usb, dln2_table);
850 
851 static struct usb_driver dln2_driver = {
852 	.name = "dln2",
853 	.probe = dln2_probe,
854 	.disconnect = dln2_disconnect,
855 	.id_table = dln2_table,
856 	.suspend = dln2_suspend,
857 	.resume = dln2_resume,
858 };
859 
860 module_usb_driver(dln2_driver);
861 
862 MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
863 MODULE_DESCRIPTION("Core driver for the Diolan DLN2 interface adapter");
864 MODULE_LICENSE("GPL v2");
865