1 /*
2 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
47
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF 1
50 #define MLXREG_HOTPLUG_MASK_OFF 2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
52
53 /* ASIC good health mask. */
54 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
55
56 #define MLXREG_HOTPLUG_ATTRS_MAX 24
57 #define MLXREG_HOTPLUG_NOT_ASSERT 3
58
59 /**
60 * struct mlxreg_hotplug_priv_data - platform private data:
61 * @irq: platform device interrupt number;
62 * @dev: basic device;
63 * @pdev: platform device;
64 * @plat: platform data;
65 * @regmap: register map handle;
66 * @dwork_irq: delayed work template;
67 * @lock: spin lock;
68 * @hwmon: hwmon device;
69 * @mlxreg_hotplug_attr: sysfs attributes array;
70 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
71 * @group: sysfs attribute group;
72 * @groups: list of sysfs attribute group for hwmon registration;
73 * @cell: location of top aggregation interrupt register;
74 * @mask: top aggregation interrupt common mask;
75 * @aggr_cache: last value of aggregation register status;
76 * @after_probe: flag indication probing completion;
77 * @not_asserted: number of entries in workqueue with no signal assertion;
78 */
79 struct mlxreg_hotplug_priv_data {
80 int irq;
81 struct device *dev;
82 struct platform_device *pdev;
83 struct mlxreg_hotplug_platform_data *plat;
84 struct regmap *regmap;
85 struct delayed_work dwork_irq;
86 spinlock_t lock; /* sync with interrupt */
87 struct device *hwmon;
88 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
89 struct sensor_device_attribute_2
90 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
91 struct attribute_group group;
92 const struct attribute_group *groups[2];
93 u32 cell;
94 u32 mask;
95 u32 aggr_cache;
96 bool after_probe;
97 u8 not_asserted;
98 };
99
mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)100 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
101 struct mlxreg_core_data *data)
102 {
103 struct mlxreg_core_hotplug_platform_data *pdata;
104
105 /* Notify user by sending hwmon uevent. */
106 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
107
108 /*
109 * Return if adapter number is negative. It could be in case hotplug
110 * event is not associated with hotplug device.
111 */
112 if (data->hpdev.nr < 0)
113 return 0;
114
115 pdata = dev_get_platdata(&priv->pdev->dev);
116 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
117 pdata->shift_nr);
118 if (!data->hpdev.adapter) {
119 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
120 data->hpdev.nr + pdata->shift_nr);
121 return -EFAULT;
122 }
123
124 data->hpdev.client = i2c_new_device(data->hpdev.adapter,
125 data->hpdev.brdinfo);
126 if (!data->hpdev.client) {
127 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
128 data->hpdev.brdinfo->type, data->hpdev.nr +
129 pdata->shift_nr, data->hpdev.brdinfo->addr);
130
131 i2c_put_adapter(data->hpdev.adapter);
132 data->hpdev.adapter = NULL;
133 return -EFAULT;
134 }
135
136 return 0;
137 }
138
139 static void
mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)140 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
141 struct mlxreg_core_data *data)
142 {
143 /* Notify user by sending hwmon uevent. */
144 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
145
146 if (data->hpdev.client) {
147 i2c_unregister_device(data->hpdev.client);
148 data->hpdev.client = NULL;
149 }
150
151 if (data->hpdev.adapter) {
152 i2c_put_adapter(data->hpdev.adapter);
153 data->hpdev.adapter = NULL;
154 }
155 }
156
mlxreg_hotplug_attr_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
162 struct mlxreg_core_hotplug_platform_data *pdata;
163 int index = to_sensor_dev_attr_2(attr)->index;
164 int nr = to_sensor_dev_attr_2(attr)->nr;
165 struct mlxreg_core_item *item;
166 struct mlxreg_core_data *data;
167 u32 regval;
168 int ret;
169
170 pdata = dev_get_platdata(&priv->pdev->dev);
171 item = pdata->items + nr;
172 data = item->data + index;
173
174 ret = regmap_read(priv->regmap, data->reg, ®val);
175 if (ret)
176 return ret;
177
178 if (item->health) {
179 regval &= data->mask;
180 } else {
181 /* Bit = 0 : functional if item->inversed is true. */
182 if (item->inversed)
183 regval = !(regval & data->mask);
184 else
185 regval = !!(regval & data->mask);
186 }
187
188 return sprintf(buf, "%u\n", regval);
189 }
190
191 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
192 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
193
mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data * priv)194 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
195 {
196 struct mlxreg_core_hotplug_platform_data *pdata;
197 struct mlxreg_core_item *item;
198 struct mlxreg_core_data *data;
199 int num_attrs = 0, id = 0, i, j;
200
201 pdata = dev_get_platdata(&priv->pdev->dev);
202 item = pdata->items;
203
204 /* Go over all kinds of items - psu, pwr, fan. */
205 for (i = 0; i < pdata->counter; i++, item++) {
206 num_attrs += item->count;
207 data = item->data;
208 /* Go over all units within the item. */
209 for (j = 0; j < item->count; j++, data++, id++) {
210 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
211 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
212 GFP_KERNEL,
213 data->label);
214
215 if (!PRIV_ATTR(id)->name) {
216 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
217 id);
218 return -ENOMEM;
219 }
220
221 PRIV_DEV_ATTR(id).dev_attr.attr.name =
222 PRIV_ATTR(id)->name;
223 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
224 PRIV_DEV_ATTR(id).dev_attr.show =
225 mlxreg_hotplug_attr_show;
226 PRIV_DEV_ATTR(id).nr = i;
227 PRIV_DEV_ATTR(id).index = j;
228 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
229 }
230 }
231
232 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
233 num_attrs,
234 sizeof(struct attribute *),
235 GFP_KERNEL);
236 if (!priv->group.attrs)
237 return -ENOMEM;
238
239 priv->group.attrs = priv->mlxreg_hotplug_attr;
240 priv->groups[0] = &priv->group;
241 priv->groups[1] = NULL;
242
243 return 0;
244 }
245
246 static void
mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)247 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
248 struct mlxreg_core_item *item)
249 {
250 struct mlxreg_core_data *data;
251 unsigned long asserted;
252 u32 regval, bit;
253 int ret;
254
255 /*
256 * Validate if item related to received signal type is valid.
257 * It should never happen, excepted the situation when some
258 * piece of hardware is broken. In such situation just produce
259 * error message and return. Caller must continue to handle the
260 * signals from other devices if any.
261 */
262 if (unlikely(!item)) {
263 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
264 item->reg, item->mask);
265
266 return;
267 }
268
269 /* Mask event. */
270 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
271 0);
272 if (ret)
273 goto out;
274
275 /* Read status. */
276 ret = regmap_read(priv->regmap, item->reg, ®val);
277 if (ret)
278 goto out;
279
280 /* Set asserted bits and save last status. */
281 regval &= item->mask;
282 asserted = item->cache ^ regval;
283 item->cache = regval;
284
285 for_each_set_bit(bit, &asserted, 8) {
286 data = item->data + bit;
287 if (regval & BIT(bit)) {
288 if (item->inversed)
289 mlxreg_hotplug_device_destroy(priv, data);
290 else
291 mlxreg_hotplug_device_create(priv, data);
292 } else {
293 if (item->inversed)
294 mlxreg_hotplug_device_create(priv, data);
295 else
296 mlxreg_hotplug_device_destroy(priv, data);
297 }
298 }
299
300 /* Acknowledge event. */
301 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
302 0);
303 if (ret)
304 goto out;
305
306 /* Unmask event. */
307 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
308 item->mask);
309
310 out:
311 if (ret)
312 dev_err(priv->dev, "Failed to complete workqueue.\n");
313 }
314
315 static void
mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)316 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
317 struct mlxreg_core_item *item)
318 {
319 struct mlxreg_core_data *data = item->data;
320 u32 regval;
321 int i, ret = 0;
322
323 for (i = 0; i < item->count; i++, data++) {
324 /* Mask event. */
325 ret = regmap_write(priv->regmap, data->reg +
326 MLXREG_HOTPLUG_MASK_OFF, 0);
327 if (ret)
328 goto out;
329
330 /* Read status. */
331 ret = regmap_read(priv->regmap, data->reg, ®val);
332 if (ret)
333 goto out;
334
335 regval &= data->mask;
336
337 if (item->cache == regval)
338 goto ack_event;
339
340 /*
341 * ASIC health indication is provided through two bits. Bits
342 * value 0x2 indicates that ASIC reached the good health, value
343 * 0x0 indicates ASIC the bad health or dormant state and value
344 * 0x3 indicates the booting state. During ASIC reset it should
345 * pass the following states: dormant -> booting -> good.
346 */
347 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
348 if (!data->attached) {
349 /*
350 * ASIC is in steady state. Connect associated
351 * device, if configured.
352 */
353 mlxreg_hotplug_device_create(priv, data);
354 data->attached = true;
355 }
356 } else {
357 if (data->attached) {
358 /*
359 * ASIC health is failed after ASIC has been
360 * in steady state. Disconnect associated
361 * device, if it has been connected.
362 */
363 mlxreg_hotplug_device_destroy(priv, data);
364 data->attached = false;
365 data->health_cntr = 0;
366 }
367 }
368 item->cache = regval;
369 ack_event:
370 /* Acknowledge event. */
371 ret = regmap_write(priv->regmap, data->reg +
372 MLXREG_HOTPLUG_EVENT_OFF, 0);
373 if (ret)
374 goto out;
375
376 /* Unmask event. */
377 ret = regmap_write(priv->regmap, data->reg +
378 MLXREG_HOTPLUG_MASK_OFF, data->mask);
379 if (ret)
380 goto out;
381 }
382
383 out:
384 if (ret)
385 dev_err(priv->dev, "Failed to complete workqueue.\n");
386 }
387
388 /*
389 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
390 * registers according to the below hierarchy schema:
391 *
392 * Aggregation registers (status/mask)
393 * PSU registers: *---*
394 * *-----------------* | |
395 * |status/event/mask|-----> | * |
396 * *-----------------* | |
397 * Power registers: | |
398 * *-----------------* | |
399 * |status/event/mask|-----> | * |
400 * *-----------------* | |
401 * FAN registers: | |--> CPU
402 * *-----------------* | |
403 * |status/event/mask|-----> | * |
404 * *-----------------* | |
405 * ASIC registers: | |
406 * *-----------------* | |
407 * |status/event/mask|-----> | * |
408 * *-----------------* | |
409 * *---*
410 *
411 * In case some system changed are detected: FAN in/out, PSU in/out, power
412 * cable attached/detached, ASIC health good/bad, relevant device is created
413 * or destroyed.
414 */
mlxreg_hotplug_work_handler(struct work_struct * work)415 static void mlxreg_hotplug_work_handler(struct work_struct *work)
416 {
417 struct mlxreg_core_hotplug_platform_data *pdata;
418 struct mlxreg_hotplug_priv_data *priv;
419 struct mlxreg_core_item *item;
420 u32 regval, aggr_asserted;
421 unsigned long flags;
422 int i, ret;
423
424 priv = container_of(work, struct mlxreg_hotplug_priv_data,
425 dwork_irq.work);
426 pdata = dev_get_platdata(&priv->pdev->dev);
427 item = pdata->items;
428
429 /* Mask aggregation event. */
430 ret = regmap_write(priv->regmap, pdata->cell +
431 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
432 if (ret < 0)
433 goto out;
434
435 /* Read aggregation status. */
436 ret = regmap_read(priv->regmap, pdata->cell, ®val);
437 if (ret)
438 goto out;
439
440 regval &= pdata->mask;
441 aggr_asserted = priv->aggr_cache ^ regval;
442 priv->aggr_cache = regval;
443
444 /*
445 * Handler is invoked, but no assertion is detected at top aggregation
446 * status level. Set aggr_asserted to mask value to allow handler extra
447 * run over all relevant signals to recover any missed signal.
448 */
449 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
450 priv->not_asserted = 0;
451 aggr_asserted = pdata->mask;
452 }
453 if (!aggr_asserted)
454 goto unmask_event;
455
456 /* Handle topology and health configuration changes. */
457 for (i = 0; i < pdata->counter; i++, item++) {
458 if (aggr_asserted & item->aggr_mask) {
459 if (item->health)
460 mlxreg_hotplug_health_work_helper(priv, item);
461 else
462 mlxreg_hotplug_work_helper(priv, item);
463 }
464 }
465
466 spin_lock_irqsave(&priv->lock, flags);
467
468 /*
469 * It is possible, that some signals have been inserted, while
470 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
471 * case such signals will be missed. In order to handle these signals
472 * delayed work is canceled and work task re-scheduled for immediate
473 * execution. It allows to handle missed signals, if any. In other case
474 * work handler just validates that no new signals have been received
475 * during masking.
476 */
477 cancel_delayed_work(&priv->dwork_irq);
478 schedule_delayed_work(&priv->dwork_irq, 0);
479
480 spin_unlock_irqrestore(&priv->lock, flags);
481
482 return;
483
484 unmask_event:
485 priv->not_asserted++;
486 /* Unmask aggregation event (no need acknowledge). */
487 ret = regmap_write(priv->regmap, pdata->cell +
488 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
489
490 out:
491 if (ret)
492 dev_err(priv->dev, "Failed to complete workqueue.\n");
493 }
494
mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data * priv)495 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
496 {
497 struct mlxreg_core_hotplug_platform_data *pdata;
498 struct mlxreg_core_item *item;
499 struct mlxreg_core_data *data;
500 u32 regval;
501 int i, j, ret;
502
503 pdata = dev_get_platdata(&priv->pdev->dev);
504 item = pdata->items;
505
506 for (i = 0; i < pdata->counter; i++, item++) {
507 /* Clear group presense event. */
508 ret = regmap_write(priv->regmap, item->reg +
509 MLXREG_HOTPLUG_EVENT_OFF, 0);
510 if (ret)
511 goto out;
512
513 /*
514 * Verify if hardware configuration requires to disable
515 * interrupt capability for some of components.
516 */
517 data = item->data;
518 for (j = 0; j < item->count; j++, data++) {
519 /* Verify if the attribute has capability register. */
520 if (data->capability) {
521 /* Read capability register. */
522 ret = regmap_read(priv->regmap,
523 data->capability, ®val);
524 if (ret)
525 goto out;
526
527 if (!(regval & data->bit))
528 item->mask &= ~BIT(j);
529 }
530 }
531
532 /* Set group initial status as mask and unmask group event. */
533 if (item->inversed) {
534 item->cache = item->mask;
535 ret = regmap_write(priv->regmap, item->reg +
536 MLXREG_HOTPLUG_MASK_OFF,
537 item->mask);
538 if (ret)
539 goto out;
540 }
541 }
542
543 /* Keep aggregation initial status as zero and unmask events. */
544 ret = regmap_write(priv->regmap, pdata->cell +
545 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
546 if (ret)
547 goto out;
548
549 /* Keep low aggregation initial status as zero and unmask events. */
550 if (pdata->cell_low) {
551 ret = regmap_write(priv->regmap, pdata->cell_low +
552 MLXREG_HOTPLUG_AGGR_MASK_OFF,
553 pdata->mask_low);
554 if (ret)
555 goto out;
556 }
557
558 /* Invoke work handler for initializing hot plug devices setting. */
559 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
560
561 out:
562 if (ret)
563 dev_err(priv->dev, "Failed to set interrupts.\n");
564 enable_irq(priv->irq);
565 return ret;
566 }
567
mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data * priv)568 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
569 {
570 struct mlxreg_core_hotplug_platform_data *pdata;
571 struct mlxreg_core_item *item;
572 struct mlxreg_core_data *data;
573 int count, i, j;
574
575 pdata = dev_get_platdata(&priv->pdev->dev);
576 item = pdata->items;
577 disable_irq(priv->irq);
578 cancel_delayed_work_sync(&priv->dwork_irq);
579
580 /* Mask low aggregation event, if defined. */
581 if (pdata->cell_low)
582 regmap_write(priv->regmap, pdata->cell_low +
583 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
584
585 /* Mask aggregation event. */
586 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
587 0);
588
589 /* Clear topology configurations. */
590 for (i = 0; i < pdata->counter; i++, item++) {
591 data = item->data;
592 /* Mask group presense event. */
593 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
594 0);
595 /* Clear group presense event. */
596 regmap_write(priv->regmap, data->reg +
597 MLXREG_HOTPLUG_EVENT_OFF, 0);
598
599 /* Remove all the attached devices in group. */
600 count = item->count;
601 for (j = 0; j < count; j++, data++)
602 mlxreg_hotplug_device_destroy(priv, data);
603 }
604 }
605
mlxreg_hotplug_irq_handler(int irq,void * dev)606 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
607 {
608 struct mlxreg_hotplug_priv_data *priv;
609
610 priv = (struct mlxreg_hotplug_priv_data *)dev;
611
612 /* Schedule work task for immediate execution.*/
613 schedule_delayed_work(&priv->dwork_irq, 0);
614
615 return IRQ_HANDLED;
616 }
617
mlxreg_hotplug_probe(struct platform_device * pdev)618 static int mlxreg_hotplug_probe(struct platform_device *pdev)
619 {
620 struct mlxreg_core_hotplug_platform_data *pdata;
621 struct mlxreg_hotplug_priv_data *priv;
622 struct i2c_adapter *deferred_adap;
623 int err;
624
625 pdata = dev_get_platdata(&pdev->dev);
626 if (!pdata) {
627 dev_err(&pdev->dev, "Failed to get platform data.\n");
628 return -EINVAL;
629 }
630
631 /* Defer probing if the necessary adapter is not configured yet. */
632 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
633 if (!deferred_adap)
634 return -EPROBE_DEFER;
635 i2c_put_adapter(deferred_adap);
636
637 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
638 if (!priv)
639 return -ENOMEM;
640
641 if (pdata->irq) {
642 priv->irq = pdata->irq;
643 } else {
644 priv->irq = platform_get_irq(pdev, 0);
645 if (priv->irq < 0)
646 return priv->irq;
647 }
648
649 priv->regmap = pdata->regmap;
650 priv->dev = pdev->dev.parent;
651 priv->pdev = pdev;
652
653 err = devm_request_irq(&pdev->dev, priv->irq,
654 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
655 | IRQF_SHARED, "mlxreg-hotplug", priv);
656 if (err) {
657 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
658 return err;
659 }
660
661 disable_irq(priv->irq);
662 spin_lock_init(&priv->lock);
663 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
664 dev_set_drvdata(&pdev->dev, priv);
665
666 err = mlxreg_hotplug_attr_init(priv);
667 if (err) {
668 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
669 err);
670 return err;
671 }
672
673 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
674 "mlxreg_hotplug", priv, priv->groups);
675 if (IS_ERR(priv->hwmon)) {
676 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
677 PTR_ERR(priv->hwmon));
678 return PTR_ERR(priv->hwmon);
679 }
680
681 /* Perform initial interrupts setup. */
682 mlxreg_hotplug_set_irq(priv);
683 priv->after_probe = true;
684
685 return 0;
686 }
687
mlxreg_hotplug_remove(struct platform_device * pdev)688 static int mlxreg_hotplug_remove(struct platform_device *pdev)
689 {
690 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
691
692 /* Clean interrupts setup. */
693 mlxreg_hotplug_unset_irq(priv);
694 devm_free_irq(&pdev->dev, priv->irq, priv);
695
696 return 0;
697 }
698
699 static struct platform_driver mlxreg_hotplug_driver = {
700 .driver = {
701 .name = "mlxreg-hotplug",
702 },
703 .probe = mlxreg_hotplug_probe,
704 .remove = mlxreg_hotplug_remove,
705 };
706
707 module_platform_driver(mlxreg_hotplug_driver);
708
709 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
710 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
711 MODULE_LICENSE("Dual BSD/GPL");
712 MODULE_ALIAS("platform:mlxreg-hotplug");
713