1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * GPIO Greybus driver.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/gpio/driver.h>
15 #include <linux/mutex.h>
16 #include <linux/greybus.h>
17 
18 #include "gbphy.h"
19 
20 struct gb_gpio_line {
21 	/* The following has to be an array of line_max entries */
22 	/* --> make them just a flags field */
23 	u8			active:    1,
24 				direction: 1,	/* 0 = output, 1 = input */
25 				value:     1;	/* 0 = low, 1 = high */
26 	u16			debounce_usec;
27 
28 	u8			irq_type;
29 	bool			irq_type_pending;
30 	bool			masked;
31 	bool			masked_pending;
32 };
33 
34 struct gb_gpio_controller {
35 	struct gbphy_device	*gbphy_dev;
36 	struct gb_connection	*connection;
37 	u8			line_max;	/* max line number */
38 	struct gb_gpio_line	*lines;
39 
40 	struct gpio_chip	chip;
41 	struct irq_chip		irqc;
42 	struct mutex		irq_lock;
43 };
44 #define gpio_chip_to_gb_gpio_controller(chip) \
45 	container_of(chip, struct gb_gpio_controller, chip)
46 #define irq_data_to_gpio_chip(d) (d->domain->host_data)
47 
gb_gpio_line_count_operation(struct gb_gpio_controller * ggc)48 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
49 {
50 	struct gb_gpio_line_count_response response;
51 	int ret;
52 
53 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
54 				NULL, 0, &response, sizeof(response));
55 	if (!ret)
56 		ggc->line_max = response.count;
57 	return ret;
58 }
59 
gb_gpio_activate_operation(struct gb_gpio_controller * ggc,u8 which)60 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
61 {
62 	struct gb_gpio_activate_request request;
63 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
64 	int ret;
65 
66 	ret = gbphy_runtime_get_sync(gbphy_dev);
67 	if (ret)
68 		return ret;
69 
70 	request.which = which;
71 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
72 				&request, sizeof(request), NULL, 0);
73 	if (ret) {
74 		gbphy_runtime_put_autosuspend(gbphy_dev);
75 		return ret;
76 	}
77 
78 	ggc->lines[which].active = true;
79 
80 	return 0;
81 }
82 
gb_gpio_deactivate_operation(struct gb_gpio_controller * ggc,u8 which)83 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
84 					 u8 which)
85 {
86 	struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
87 	struct device *dev = &gbphy_dev->dev;
88 	struct gb_gpio_deactivate_request request;
89 	int ret;
90 
91 	request.which = which;
92 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
93 				&request, sizeof(request), NULL, 0);
94 	if (ret) {
95 		dev_err(dev, "failed to deactivate gpio %u\n", which);
96 		goto out_pm_put;
97 	}
98 
99 	ggc->lines[which].active = false;
100 
101 out_pm_put:
102 	gbphy_runtime_put_autosuspend(gbphy_dev);
103 }
104 
gb_gpio_get_direction_operation(struct gb_gpio_controller * ggc,u8 which)105 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
106 					   u8 which)
107 {
108 	struct device *dev = &ggc->gbphy_dev->dev;
109 	struct gb_gpio_get_direction_request request;
110 	struct gb_gpio_get_direction_response response;
111 	int ret;
112 	u8 direction;
113 
114 	request.which = which;
115 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
116 				&request, sizeof(request),
117 				&response, sizeof(response));
118 	if (ret)
119 		return ret;
120 
121 	direction = response.direction;
122 	if (direction && direction != 1) {
123 		dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
124 			 which, direction);
125 	}
126 	ggc->lines[which].direction = direction ? 1 : 0;
127 	return 0;
128 }
129 
gb_gpio_direction_in_operation(struct gb_gpio_controller * ggc,u8 which)130 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
131 					  u8 which)
132 {
133 	struct gb_gpio_direction_in_request request;
134 	int ret;
135 
136 	request.which = which;
137 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
138 				&request, sizeof(request), NULL, 0);
139 	if (!ret)
140 		ggc->lines[which].direction = 1;
141 	return ret;
142 }
143 
gb_gpio_direction_out_operation(struct gb_gpio_controller * ggc,u8 which,bool value_high)144 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
145 					   u8 which, bool value_high)
146 {
147 	struct gb_gpio_direction_out_request request;
148 	int ret;
149 
150 	request.which = which;
151 	request.value = value_high ? 1 : 0;
152 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
153 				&request, sizeof(request), NULL, 0);
154 	if (!ret)
155 		ggc->lines[which].direction = 0;
156 	return ret;
157 }
158 
gb_gpio_get_value_operation(struct gb_gpio_controller * ggc,u8 which)159 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
160 				       u8 which)
161 {
162 	struct device *dev = &ggc->gbphy_dev->dev;
163 	struct gb_gpio_get_value_request request;
164 	struct gb_gpio_get_value_response response;
165 	int ret;
166 	u8 value;
167 
168 	request.which = which;
169 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
170 				&request, sizeof(request),
171 				&response, sizeof(response));
172 	if (ret) {
173 		dev_err(dev, "failed to get value of gpio %u\n", which);
174 		return ret;
175 	}
176 
177 	value = response.value;
178 	if (value && value != 1) {
179 		dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
180 			 which, value);
181 	}
182 	ggc->lines[which].value = value ? 1 : 0;
183 	return 0;
184 }
185 
gb_gpio_set_value_operation(struct gb_gpio_controller * ggc,u8 which,bool value_high)186 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
187 					u8 which, bool value_high)
188 {
189 	struct device *dev = &ggc->gbphy_dev->dev;
190 	struct gb_gpio_set_value_request request;
191 	int ret;
192 
193 	if (ggc->lines[which].direction == 1) {
194 		dev_warn(dev, "refusing to set value of input gpio %u\n",
195 			 which);
196 		return;
197 	}
198 
199 	request.which = which;
200 	request.value = value_high ? 1 : 0;
201 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
202 				&request, sizeof(request), NULL, 0);
203 	if (ret) {
204 		dev_err(dev, "failed to set value of gpio %u\n", which);
205 		return;
206 	}
207 
208 	ggc->lines[which].value = request.value;
209 }
210 
gb_gpio_set_debounce_operation(struct gb_gpio_controller * ggc,u8 which,u16 debounce_usec)211 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
212 					  u8 which, u16 debounce_usec)
213 {
214 	struct gb_gpio_set_debounce_request request;
215 	int ret;
216 
217 	request.which = which;
218 	request.usec = cpu_to_le16(debounce_usec);
219 	ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
220 				&request, sizeof(request), NULL, 0);
221 	if (!ret)
222 		ggc->lines[which].debounce_usec = debounce_usec;
223 	return ret;
224 }
225 
_gb_gpio_irq_mask(struct gb_gpio_controller * ggc,u8 hwirq)226 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
227 {
228 	struct device *dev = &ggc->gbphy_dev->dev;
229 	struct gb_gpio_irq_mask_request request;
230 	int ret;
231 
232 	request.which = hwirq;
233 	ret = gb_operation_sync(ggc->connection,
234 				GB_GPIO_TYPE_IRQ_MASK,
235 				&request, sizeof(request), NULL, 0);
236 	if (ret)
237 		dev_err(dev, "failed to mask irq: %d\n", ret);
238 }
239 
_gb_gpio_irq_unmask(struct gb_gpio_controller * ggc,u8 hwirq)240 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
241 {
242 	struct device *dev = &ggc->gbphy_dev->dev;
243 	struct gb_gpio_irq_unmask_request request;
244 	int ret;
245 
246 	request.which = hwirq;
247 	ret = gb_operation_sync(ggc->connection,
248 				GB_GPIO_TYPE_IRQ_UNMASK,
249 				&request, sizeof(request), NULL, 0);
250 	if (ret)
251 		dev_err(dev, "failed to unmask irq: %d\n", ret);
252 }
253 
_gb_gpio_irq_set_type(struct gb_gpio_controller * ggc,u8 hwirq,u8 type)254 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
255 				  u8 hwirq, u8 type)
256 {
257 	struct device *dev = &ggc->gbphy_dev->dev;
258 	struct gb_gpio_irq_type_request request;
259 	int ret;
260 
261 	request.which = hwirq;
262 	request.type = type;
263 
264 	ret = gb_operation_sync(ggc->connection,
265 				GB_GPIO_TYPE_IRQ_TYPE,
266 				&request, sizeof(request), NULL, 0);
267 	if (ret)
268 		dev_err(dev, "failed to set irq type: %d\n", ret);
269 }
270 
gb_gpio_irq_mask(struct irq_data * d)271 static void gb_gpio_irq_mask(struct irq_data *d)
272 {
273 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
274 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
275 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
276 
277 	line->masked = true;
278 	line->masked_pending = true;
279 }
280 
gb_gpio_irq_unmask(struct irq_data * d)281 static void gb_gpio_irq_unmask(struct irq_data *d)
282 {
283 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
284 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
285 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
286 
287 	line->masked = false;
288 	line->masked_pending = true;
289 }
290 
gb_gpio_irq_set_type(struct irq_data * d,unsigned int type)291 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
292 {
293 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
294 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
295 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
296 	struct device *dev = &ggc->gbphy_dev->dev;
297 	u8 irq_type;
298 
299 	switch (type) {
300 	case IRQ_TYPE_NONE:
301 		irq_type = GB_GPIO_IRQ_TYPE_NONE;
302 		break;
303 	case IRQ_TYPE_EDGE_RISING:
304 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
305 		break;
306 	case IRQ_TYPE_EDGE_FALLING:
307 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
308 		break;
309 	case IRQ_TYPE_EDGE_BOTH:
310 		irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
311 		break;
312 	case IRQ_TYPE_LEVEL_LOW:
313 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
314 		break;
315 	case IRQ_TYPE_LEVEL_HIGH:
316 		irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
317 		break;
318 	default:
319 		dev_err(dev, "unsupported irq type: %u\n", type);
320 		return -EINVAL;
321 	}
322 
323 	line->irq_type = irq_type;
324 	line->irq_type_pending = true;
325 
326 	return 0;
327 }
328 
gb_gpio_irq_bus_lock(struct irq_data * d)329 static void gb_gpio_irq_bus_lock(struct irq_data *d)
330 {
331 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
332 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
333 
334 	mutex_lock(&ggc->irq_lock);
335 }
336 
gb_gpio_irq_bus_sync_unlock(struct irq_data * d)337 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
338 {
339 	struct gpio_chip *chip = irq_data_to_gpio_chip(d);
340 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
341 	struct gb_gpio_line *line = &ggc->lines[d->hwirq];
342 
343 	if (line->irq_type_pending) {
344 		_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
345 		line->irq_type_pending = false;
346 	}
347 
348 	if (line->masked_pending) {
349 		if (line->masked)
350 			_gb_gpio_irq_mask(ggc, d->hwirq);
351 		else
352 			_gb_gpio_irq_unmask(ggc, d->hwirq);
353 		line->masked_pending = false;
354 	}
355 
356 	mutex_unlock(&ggc->irq_lock);
357 }
358 
gb_gpio_request_handler(struct gb_operation * op)359 static int gb_gpio_request_handler(struct gb_operation *op)
360 {
361 	struct gb_connection *connection = op->connection;
362 	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
363 	struct device *dev = &ggc->gbphy_dev->dev;
364 	struct gb_message *request;
365 	struct gb_gpio_irq_event_request *event;
366 	u8 type = op->type;
367 	int irq, ret;
368 
369 	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
370 		dev_err(dev, "unsupported unsolicited request: %u\n", type);
371 		return -EINVAL;
372 	}
373 
374 	request = op->request;
375 
376 	if (request->payload_size < sizeof(*event)) {
377 		dev_err(dev, "short event received (%zu < %zu)\n",
378 			request->payload_size, sizeof(*event));
379 		return -EINVAL;
380 	}
381 
382 	event = request->payload;
383 	if (event->which > ggc->line_max) {
384 		dev_err(dev, "invalid hw irq: %d\n", event->which);
385 		return -EINVAL;
386 	}
387 
388 	irq = irq_find_mapping(ggc->chip.irq.domain, event->which);
389 	if (!irq) {
390 		dev_err(dev, "failed to find IRQ\n");
391 		return -EINVAL;
392 	}
393 
394 	local_irq_disable();
395 	ret = generic_handle_irq(irq);
396 	local_irq_enable();
397 
398 	if (ret)
399 		dev_err(dev, "failed to invoke irq handler\n");
400 
401 	return ret;
402 }
403 
gb_gpio_request(struct gpio_chip * chip,unsigned int offset)404 static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
405 {
406 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
407 
408 	return gb_gpio_activate_operation(ggc, (u8)offset);
409 }
410 
gb_gpio_free(struct gpio_chip * chip,unsigned int offset)411 static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
412 {
413 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
414 
415 	gb_gpio_deactivate_operation(ggc, (u8)offset);
416 }
417 
gb_gpio_get_direction(struct gpio_chip * chip,unsigned int offset)418 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
419 {
420 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
421 	u8 which;
422 	int ret;
423 
424 	which = (u8)offset;
425 	ret = gb_gpio_get_direction_operation(ggc, which);
426 	if (ret)
427 		return ret;
428 
429 	return ggc->lines[which].direction ? 1 : 0;
430 }
431 
gb_gpio_direction_input(struct gpio_chip * chip,unsigned int offset)432 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
433 {
434 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
435 
436 	return gb_gpio_direction_in_operation(ggc, (u8)offset);
437 }
438 
gb_gpio_direction_output(struct gpio_chip * chip,unsigned int offset,int value)439 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
440 				    int value)
441 {
442 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
443 
444 	return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
445 }
446 
gb_gpio_get(struct gpio_chip * chip,unsigned int offset)447 static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
448 {
449 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
450 	u8 which;
451 	int ret;
452 
453 	which = (u8)offset;
454 	ret = gb_gpio_get_value_operation(ggc, which);
455 	if (ret)
456 		return ret;
457 
458 	return ggc->lines[which].value;
459 }
460 
gb_gpio_set(struct gpio_chip * chip,unsigned int offset,int value)461 static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
462 {
463 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
464 
465 	gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
466 }
467 
gb_gpio_set_config(struct gpio_chip * chip,unsigned int offset,unsigned long config)468 static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
469 			      unsigned long config)
470 {
471 	struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
472 	u32 debounce;
473 
474 	if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
475 		return -ENOTSUPP;
476 
477 	debounce = pinconf_to_config_argument(config);
478 	if (debounce > U16_MAX)
479 		return -EINVAL;
480 
481 	return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
482 }
483 
gb_gpio_controller_setup(struct gb_gpio_controller * ggc)484 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
485 {
486 	int ret;
487 
488 	/* Now find out how many lines there are */
489 	ret = gb_gpio_line_count_operation(ggc);
490 	if (ret)
491 		return ret;
492 
493 	ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
494 			     GFP_KERNEL);
495 	if (!ggc->lines)
496 		return -ENOMEM;
497 
498 	return ret;
499 }
500 
gb_gpio_probe(struct gbphy_device * gbphy_dev,const struct gbphy_device_id * id)501 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
502 			 const struct gbphy_device_id *id)
503 {
504 	struct gb_connection *connection;
505 	struct gb_gpio_controller *ggc;
506 	struct gpio_chip *gpio;
507 	struct gpio_irq_chip *girq;
508 	struct irq_chip *irqc;
509 	int ret;
510 
511 	ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
512 	if (!ggc)
513 		return -ENOMEM;
514 
515 	connection =
516 		gb_connection_create(gbphy_dev->bundle,
517 				     le16_to_cpu(gbphy_dev->cport_desc->id),
518 				     gb_gpio_request_handler);
519 	if (IS_ERR(connection)) {
520 		ret = PTR_ERR(connection);
521 		goto exit_ggc_free;
522 	}
523 
524 	ggc->connection = connection;
525 	gb_connection_set_data(connection, ggc);
526 	ggc->gbphy_dev = gbphy_dev;
527 	gb_gbphy_set_data(gbphy_dev, ggc);
528 
529 	ret = gb_connection_enable_tx(connection);
530 	if (ret)
531 		goto exit_connection_destroy;
532 
533 	ret = gb_gpio_controller_setup(ggc);
534 	if (ret)
535 		goto exit_connection_disable;
536 
537 	irqc = &ggc->irqc;
538 	irqc->irq_mask = gb_gpio_irq_mask;
539 	irqc->irq_unmask = gb_gpio_irq_unmask;
540 	irqc->irq_set_type = gb_gpio_irq_set_type;
541 	irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
542 	irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
543 	irqc->name = "greybus_gpio";
544 
545 	mutex_init(&ggc->irq_lock);
546 
547 	gpio = &ggc->chip;
548 
549 	gpio->label = "greybus_gpio";
550 	gpio->parent = &gbphy_dev->dev;
551 	gpio->owner = THIS_MODULE;
552 
553 	gpio->request = gb_gpio_request;
554 	gpio->free = gb_gpio_free;
555 	gpio->get_direction = gb_gpio_get_direction;
556 	gpio->direction_input = gb_gpio_direction_input;
557 	gpio->direction_output = gb_gpio_direction_output;
558 	gpio->get = gb_gpio_get;
559 	gpio->set = gb_gpio_set;
560 	gpio->set_config = gb_gpio_set_config;
561 	gpio->base = -1;		/* Allocate base dynamically */
562 	gpio->ngpio = ggc->line_max + 1;
563 	gpio->can_sleep = true;
564 
565 	girq = &gpio->irq;
566 	girq->chip = irqc;
567 	/* The event comes from the outside so no parent handler */
568 	girq->parent_handler = NULL;
569 	girq->num_parents = 0;
570 	girq->parents = NULL;
571 	girq->default_type = IRQ_TYPE_NONE;
572 	girq->handler = handle_level_irq;
573 
574 	ret = gb_connection_enable(connection);
575 	if (ret)
576 		goto exit_line_free;
577 
578 	ret = gpiochip_add(gpio);
579 	if (ret) {
580 		dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
581 		goto exit_line_free;
582 	}
583 
584 	gbphy_runtime_put_autosuspend(gbphy_dev);
585 	return 0;
586 
587 exit_line_free:
588 	kfree(ggc->lines);
589 exit_connection_disable:
590 	gb_connection_disable(connection);
591 exit_connection_destroy:
592 	gb_connection_destroy(connection);
593 exit_ggc_free:
594 	kfree(ggc);
595 	return ret;
596 }
597 
gb_gpio_remove(struct gbphy_device * gbphy_dev)598 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
599 {
600 	struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
601 	struct gb_connection *connection = ggc->connection;
602 	int ret;
603 
604 	ret = gbphy_runtime_get_sync(gbphy_dev);
605 	if (ret)
606 		gbphy_runtime_get_noresume(gbphy_dev);
607 
608 	gb_connection_disable_rx(connection);
609 	gpiochip_remove(&ggc->chip);
610 	gb_connection_disable(connection);
611 	gb_connection_destroy(connection);
612 	kfree(ggc->lines);
613 	kfree(ggc);
614 }
615 
616 static const struct gbphy_device_id gb_gpio_id_table[] = {
617 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
618 	{ },
619 };
620 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
621 
622 static struct gbphy_driver gpio_driver = {
623 	.name		= "gpio",
624 	.probe		= gb_gpio_probe,
625 	.remove		= gb_gpio_remove,
626 	.id_table	= gb_gpio_id_table,
627 };
628 
629 module_gbphy_driver(gpio_driver);
630 MODULE_LICENSE("GPL v2");
631