1 /*
2 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
47
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF 1
50 #define MLXREG_HOTPLUG_MASK_OFF 2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
52
53 /* ASIC good health mask. */
54 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
55
56 #define MLXREG_HOTPLUG_ATTRS_MAX 24
57 #define MLXREG_HOTPLUG_NOT_ASSERT 3
58
59 /**
60 * struct mlxreg_hotplug_priv_data - platform private data:
61 * @irq: platform device interrupt number;
62 * @dev: basic device;
63 * @pdev: platform device;
64 * @plat: platform data;
65 * @regmap: register map handle;
66 * @dwork_irq: delayed work template;
67 * @lock: spin lock;
68 * @hwmon: hwmon device;
69 * @mlxreg_hotplug_attr: sysfs attributes array;
70 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
71 * @group: sysfs attribute group;
72 * @groups: list of sysfs attribute group for hwmon registration;
73 * @cell: location of top aggregation interrupt register;
74 * @mask: top aggregation interrupt common mask;
75 * @aggr_cache: last value of aggregation register status;
76 * @after_probe: flag indication probing completion;
77 * @not_asserted: number of entries in workqueue with no signal assertion;
78 */
79 struct mlxreg_hotplug_priv_data {
80 int irq;
81 struct device *dev;
82 struct platform_device *pdev;
83 struct mlxreg_hotplug_platform_data *plat;
84 struct regmap *regmap;
85 struct delayed_work dwork_irq;
86 spinlock_t lock; /* sync with interrupt */
87 struct device *hwmon;
88 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
89 struct sensor_device_attribute_2
90 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
91 struct attribute_group group;
92 const struct attribute_group *groups[2];
93 u32 cell;
94 u32 mask;
95 u32 aggr_cache;
96 bool after_probe;
97 u8 not_asserted;
98 };
99
mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)100 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
101 struct mlxreg_core_data *data)
102 {
103 struct mlxreg_core_hotplug_platform_data *pdata;
104
105 /* Notify user by sending hwmon uevent. */
106 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
107
108 /*
109 * Return if adapter number is negative. It could be in case hotplug
110 * event is not associated with hotplug device.
111 */
112 if (data->hpdev.nr < 0)
113 return 0;
114
115 pdata = dev_get_platdata(&priv->pdev->dev);
116 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
117 pdata->shift_nr);
118 if (!data->hpdev.adapter) {
119 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
120 data->hpdev.nr + pdata->shift_nr);
121 return -EFAULT;
122 }
123
124 data->hpdev.client = i2c_new_device(data->hpdev.adapter,
125 data->hpdev.brdinfo);
126 if (!data->hpdev.client) {
127 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
128 data->hpdev.brdinfo->type, data->hpdev.nr +
129 pdata->shift_nr, data->hpdev.brdinfo->addr);
130
131 i2c_put_adapter(data->hpdev.adapter);
132 data->hpdev.adapter = NULL;
133 return -EFAULT;
134 }
135
136 return 0;
137 }
138
139 static void
mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_data * data)140 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
141 struct mlxreg_core_data *data)
142 {
143 /* Notify user by sending hwmon uevent. */
144 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
145
146 if (data->hpdev.client) {
147 i2c_unregister_device(data->hpdev.client);
148 data->hpdev.client = NULL;
149 }
150
151 if (data->hpdev.adapter) {
152 i2c_put_adapter(data->hpdev.adapter);
153 data->hpdev.adapter = NULL;
154 }
155 }
156
mlxreg_hotplug_attr_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160 {
161 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
162 struct mlxreg_core_hotplug_platform_data *pdata;
163 int index = to_sensor_dev_attr_2(attr)->index;
164 int nr = to_sensor_dev_attr_2(attr)->nr;
165 struct mlxreg_core_item *item;
166 struct mlxreg_core_data *data;
167 u32 regval;
168 int ret;
169
170 pdata = dev_get_platdata(&priv->pdev->dev);
171 item = pdata->items + nr;
172 data = item->data + index;
173
174 ret = regmap_read(priv->regmap, data->reg, ®val);
175 if (ret)
176 return ret;
177
178 if (item->health) {
179 regval &= data->mask;
180 } else {
181 /* Bit = 0 : functional if item->inversed is true. */
182 if (item->inversed)
183 regval = !(regval & data->mask);
184 else
185 regval = !!(regval & data->mask);
186 }
187
188 return sprintf(buf, "%u\n", regval);
189 }
190
191 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
192 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
193
mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data * priv)194 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
195 {
196 struct mlxreg_core_hotplug_platform_data *pdata;
197 struct mlxreg_core_item *item;
198 struct mlxreg_core_data *data;
199 int num_attrs = 0, id = 0, i, j;
200
201 pdata = dev_get_platdata(&priv->pdev->dev);
202 item = pdata->items;
203
204 /* Go over all kinds of items - psu, pwr, fan. */
205 for (i = 0; i < pdata->counter; i++, item++) {
206 num_attrs += item->count;
207 data = item->data;
208 /* Go over all units within the item. */
209 for (j = 0; j < item->count; j++, data++, id++) {
210 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
211 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
212 GFP_KERNEL,
213 data->label);
214
215 if (!PRIV_ATTR(id)->name) {
216 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
217 id);
218 return -ENOMEM;
219 }
220
221 PRIV_DEV_ATTR(id).dev_attr.attr.name =
222 PRIV_ATTR(id)->name;
223 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
224 PRIV_DEV_ATTR(id).dev_attr.show =
225 mlxreg_hotplug_attr_show;
226 PRIV_DEV_ATTR(id).nr = i;
227 PRIV_DEV_ATTR(id).index = j;
228 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
229 }
230 }
231
232 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
233 num_attrs,
234 sizeof(struct attribute *),
235 GFP_KERNEL);
236 if (!priv->group.attrs)
237 return -ENOMEM;
238
239 priv->group.attrs = priv->mlxreg_hotplug_attr;
240 priv->groups[0] = &priv->group;
241 priv->groups[1] = NULL;
242
243 return 0;
244 }
245
246 static void
mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)247 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
248 struct mlxreg_core_item *item)
249 {
250 struct mlxreg_core_data *data;
251 u32 asserted, regval, bit;
252 int ret;
253
254 /*
255 * Validate if item related to received signal type is valid.
256 * It should never happen, excepted the situation when some
257 * piece of hardware is broken. In such situation just produce
258 * error message and return. Caller must continue to handle the
259 * signals from other devices if any.
260 */
261 if (unlikely(!item)) {
262 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
263 item->reg, item->mask);
264
265 return;
266 }
267
268 /* Mask event. */
269 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
270 0);
271 if (ret)
272 goto out;
273
274 /* Read status. */
275 ret = regmap_read(priv->regmap, item->reg, ®val);
276 if (ret)
277 goto out;
278
279 /* Set asserted bits and save last status. */
280 regval &= item->mask;
281 asserted = item->cache ^ regval;
282 item->cache = regval;
283
284 for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
285 data = item->data + bit;
286 if (regval & BIT(bit)) {
287 if (item->inversed)
288 mlxreg_hotplug_device_destroy(priv, data);
289 else
290 mlxreg_hotplug_device_create(priv, data);
291 } else {
292 if (item->inversed)
293 mlxreg_hotplug_device_create(priv, data);
294 else
295 mlxreg_hotplug_device_destroy(priv, data);
296 }
297 }
298
299 /* Acknowledge event. */
300 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
301 0);
302 if (ret)
303 goto out;
304
305 /* Unmask event. */
306 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
307 item->mask);
308
309 out:
310 if (ret)
311 dev_err(priv->dev, "Failed to complete workqueue.\n");
312 }
313
314 static void
mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data * priv,struct mlxreg_core_item * item)315 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
316 struct mlxreg_core_item *item)
317 {
318 struct mlxreg_core_data *data = item->data;
319 u32 regval;
320 int i, ret = 0;
321
322 for (i = 0; i < item->count; i++, data++) {
323 /* Mask event. */
324 ret = regmap_write(priv->regmap, data->reg +
325 MLXREG_HOTPLUG_MASK_OFF, 0);
326 if (ret)
327 goto out;
328
329 /* Read status. */
330 ret = regmap_read(priv->regmap, data->reg, ®val);
331 if (ret)
332 goto out;
333
334 regval &= data->mask;
335
336 if (item->cache == regval)
337 goto ack_event;
338
339 /*
340 * ASIC health indication is provided through two bits. Bits
341 * value 0x2 indicates that ASIC reached the good health, value
342 * 0x0 indicates ASIC the bad health or dormant state and value
343 * 0x3 indicates the booting state. During ASIC reset it should
344 * pass the following states: dormant -> booting -> good.
345 */
346 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
347 if (!data->attached) {
348 /*
349 * ASIC is in steady state. Connect associated
350 * device, if configured.
351 */
352 mlxreg_hotplug_device_create(priv, data);
353 data->attached = true;
354 }
355 } else {
356 if (data->attached) {
357 /*
358 * ASIC health is failed after ASIC has been
359 * in steady state. Disconnect associated
360 * device, if it has been connected.
361 */
362 mlxreg_hotplug_device_destroy(priv, data);
363 data->attached = false;
364 data->health_cntr = 0;
365 }
366 }
367 item->cache = regval;
368 ack_event:
369 /* Acknowledge event. */
370 ret = regmap_write(priv->regmap, data->reg +
371 MLXREG_HOTPLUG_EVENT_OFF, 0);
372 if (ret)
373 goto out;
374
375 /* Unmask event. */
376 ret = regmap_write(priv->regmap, data->reg +
377 MLXREG_HOTPLUG_MASK_OFF, data->mask);
378 if (ret)
379 goto out;
380 }
381
382 out:
383 if (ret)
384 dev_err(priv->dev, "Failed to complete workqueue.\n");
385 }
386
387 /*
388 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
389 * registers according to the below hierarchy schema:
390 *
391 * Aggregation registers (status/mask)
392 * PSU registers: *---*
393 * *-----------------* | |
394 * |status/event/mask|-----> | * |
395 * *-----------------* | |
396 * Power registers: | |
397 * *-----------------* | |
398 * |status/event/mask|-----> | * |
399 * *-----------------* | |
400 * FAN registers: | |--> CPU
401 * *-----------------* | |
402 * |status/event/mask|-----> | * |
403 * *-----------------* | |
404 * ASIC registers: | |
405 * *-----------------* | |
406 * |status/event/mask|-----> | * |
407 * *-----------------* | |
408 * *---*
409 *
410 * In case some system changed are detected: FAN in/out, PSU in/out, power
411 * cable attached/detached, ASIC health good/bad, relevant device is created
412 * or destroyed.
413 */
mlxreg_hotplug_work_handler(struct work_struct * work)414 static void mlxreg_hotplug_work_handler(struct work_struct *work)
415 {
416 struct mlxreg_core_hotplug_platform_data *pdata;
417 struct mlxreg_hotplug_priv_data *priv;
418 struct mlxreg_core_item *item;
419 u32 regval, aggr_asserted;
420 unsigned long flags;
421 int i, ret;
422
423 priv = container_of(work, struct mlxreg_hotplug_priv_data,
424 dwork_irq.work);
425 pdata = dev_get_platdata(&priv->pdev->dev);
426 item = pdata->items;
427
428 /* Mask aggregation event. */
429 ret = regmap_write(priv->regmap, pdata->cell +
430 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
431 if (ret < 0)
432 goto out;
433
434 /* Read aggregation status. */
435 ret = regmap_read(priv->regmap, pdata->cell, ®val);
436 if (ret)
437 goto out;
438
439 regval &= pdata->mask;
440 aggr_asserted = priv->aggr_cache ^ regval;
441 priv->aggr_cache = regval;
442
443 /*
444 * Handler is invoked, but no assertion is detected at top aggregation
445 * status level. Set aggr_asserted to mask value to allow handler extra
446 * run over all relevant signals to recover any missed signal.
447 */
448 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
449 priv->not_asserted = 0;
450 aggr_asserted = pdata->mask;
451 }
452 if (!aggr_asserted)
453 goto unmask_event;
454
455 /* Handle topology and health configuration changes. */
456 for (i = 0; i < pdata->counter; i++, item++) {
457 if (aggr_asserted & item->aggr_mask) {
458 if (item->health)
459 mlxreg_hotplug_health_work_helper(priv, item);
460 else
461 mlxreg_hotplug_work_helper(priv, item);
462 }
463 }
464
465 spin_lock_irqsave(&priv->lock, flags);
466
467 /*
468 * It is possible, that some signals have been inserted, while
469 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
470 * case such signals will be missed. In order to handle these signals
471 * delayed work is canceled and work task re-scheduled for immediate
472 * execution. It allows to handle missed signals, if any. In other case
473 * work handler just validates that no new signals have been received
474 * during masking.
475 */
476 cancel_delayed_work(&priv->dwork_irq);
477 schedule_delayed_work(&priv->dwork_irq, 0);
478
479 spin_unlock_irqrestore(&priv->lock, flags);
480
481 return;
482
483 unmask_event:
484 priv->not_asserted++;
485 /* Unmask aggregation event (no need acknowledge). */
486 ret = regmap_write(priv->regmap, pdata->cell +
487 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
488
489 out:
490 if (ret)
491 dev_err(priv->dev, "Failed to complete workqueue.\n");
492 }
493
mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data * priv)494 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
495 {
496 struct mlxreg_core_hotplug_platform_data *pdata;
497 struct mlxreg_core_item *item;
498 int i, ret;
499
500 pdata = dev_get_platdata(&priv->pdev->dev);
501 item = pdata->items;
502
503 for (i = 0; i < pdata->counter; i++, item++) {
504 /* Clear group presense event. */
505 ret = regmap_write(priv->regmap, item->reg +
506 MLXREG_HOTPLUG_EVENT_OFF, 0);
507 if (ret)
508 goto out;
509
510 /* Set group initial status as mask and unmask group event. */
511 if (item->inversed) {
512 item->cache = item->mask;
513 ret = regmap_write(priv->regmap, item->reg +
514 MLXREG_HOTPLUG_MASK_OFF,
515 item->mask);
516 if (ret)
517 goto out;
518 }
519 }
520
521 /* Keep aggregation initial status as zero and unmask events. */
522 ret = regmap_write(priv->regmap, pdata->cell +
523 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
524 if (ret)
525 goto out;
526
527 /* Keep low aggregation initial status as zero and unmask events. */
528 if (pdata->cell_low) {
529 ret = regmap_write(priv->regmap, pdata->cell_low +
530 MLXREG_HOTPLUG_AGGR_MASK_OFF,
531 pdata->mask_low);
532 if (ret)
533 goto out;
534 }
535
536 /* Invoke work handler for initializing hot plug devices setting. */
537 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
538
539 out:
540 if (ret)
541 dev_err(priv->dev, "Failed to set interrupts.\n");
542 enable_irq(priv->irq);
543 return ret;
544 }
545
mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data * priv)546 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
547 {
548 struct mlxreg_core_hotplug_platform_data *pdata;
549 struct mlxreg_core_item *item;
550 struct mlxreg_core_data *data;
551 int count, i, j;
552
553 pdata = dev_get_platdata(&priv->pdev->dev);
554 item = pdata->items;
555 disable_irq(priv->irq);
556 cancel_delayed_work_sync(&priv->dwork_irq);
557
558 /* Mask low aggregation event, if defined. */
559 if (pdata->cell_low)
560 regmap_write(priv->regmap, pdata->cell_low +
561 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
562
563 /* Mask aggregation event. */
564 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
565 0);
566
567 /* Clear topology configurations. */
568 for (i = 0; i < pdata->counter; i++, item++) {
569 data = item->data;
570 /* Mask group presense event. */
571 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
572 0);
573 /* Clear group presense event. */
574 regmap_write(priv->regmap, data->reg +
575 MLXREG_HOTPLUG_EVENT_OFF, 0);
576
577 /* Remove all the attached devices in group. */
578 count = item->count;
579 for (j = 0; j < count; j++, data++)
580 mlxreg_hotplug_device_destroy(priv, data);
581 }
582 }
583
mlxreg_hotplug_irq_handler(int irq,void * dev)584 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
585 {
586 struct mlxreg_hotplug_priv_data *priv;
587
588 priv = (struct mlxreg_hotplug_priv_data *)dev;
589
590 /* Schedule work task for immediate execution.*/
591 schedule_delayed_work(&priv->dwork_irq, 0);
592
593 return IRQ_HANDLED;
594 }
595
mlxreg_hotplug_probe(struct platform_device * pdev)596 static int mlxreg_hotplug_probe(struct platform_device *pdev)
597 {
598 struct mlxreg_core_hotplug_platform_data *pdata;
599 struct mlxreg_hotplug_priv_data *priv;
600 struct i2c_adapter *deferred_adap;
601 int err;
602
603 pdata = dev_get_platdata(&pdev->dev);
604 if (!pdata) {
605 dev_err(&pdev->dev, "Failed to get platform data.\n");
606 return -EINVAL;
607 }
608
609 /* Defer probing if the necessary adapter is not configured yet. */
610 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
611 if (!deferred_adap)
612 return -EPROBE_DEFER;
613 i2c_put_adapter(deferred_adap);
614
615 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
616 if (!priv)
617 return -ENOMEM;
618
619 if (pdata->irq) {
620 priv->irq = pdata->irq;
621 } else {
622 priv->irq = platform_get_irq(pdev, 0);
623 if (priv->irq < 0) {
624 dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
625 priv->irq);
626 return priv->irq;
627 }
628 }
629
630 priv->regmap = pdata->regmap;
631 priv->dev = pdev->dev.parent;
632 priv->pdev = pdev;
633
634 err = devm_request_irq(&pdev->dev, priv->irq,
635 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
636 | IRQF_SHARED, "mlxreg-hotplug", priv);
637 if (err) {
638 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
639 return err;
640 }
641
642 disable_irq(priv->irq);
643 spin_lock_init(&priv->lock);
644 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
645 dev_set_drvdata(&pdev->dev, priv);
646
647 err = mlxreg_hotplug_attr_init(priv);
648 if (err) {
649 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
650 err);
651 return err;
652 }
653
654 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
655 "mlxreg_hotplug", priv, priv->groups);
656 if (IS_ERR(priv->hwmon)) {
657 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
658 PTR_ERR(priv->hwmon));
659 return PTR_ERR(priv->hwmon);
660 }
661
662 /* Perform initial interrupts setup. */
663 mlxreg_hotplug_set_irq(priv);
664 priv->after_probe = true;
665
666 return 0;
667 }
668
mlxreg_hotplug_remove(struct platform_device * pdev)669 static int mlxreg_hotplug_remove(struct platform_device *pdev)
670 {
671 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
672
673 /* Clean interrupts setup. */
674 mlxreg_hotplug_unset_irq(priv);
675
676 return 0;
677 }
678
679 static struct platform_driver mlxreg_hotplug_driver = {
680 .driver = {
681 .name = "mlxreg-hotplug",
682 },
683 .probe = mlxreg_hotplug_probe,
684 .remove = mlxreg_hotplug_remove,
685 };
686
687 module_platform_driver(mlxreg_hotplug_driver);
688
689 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
690 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
691 MODULE_LICENSE("Dual BSD/GPL");
692 MODULE_ALIAS("platform:mlxreg-hotplug");
693