treewide: devm_kzalloc() -> devm_kcalloc()
[sfrench/cifs-2.6.git] / drivers / platform / mellanox / mlxreg-hotplug.c
1 /*
2  * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the names of the copyright holders nor the names of its
14  *    contributors may be used to endorse or promote products derived from
15  *    this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2 as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
47
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF        1
50 #define MLXREG_HOTPLUG_MASK_OFF         2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF    1
52
53 /* ASIC health parameters. */
54 #define MLXREG_HOTPLUG_HEALTH_MASK      0x02
55 #define MLXREG_HOTPLUG_RST_CNTR         3
56
57 #define MLXREG_HOTPLUG_ATTRS_MAX        24
58
59 /**
60  * struct mlxreg_hotplug_priv_data - platform private data:
61  * @irq: platform device interrupt number;
62  * @pdev: platform device;
63  * @plat: platform data;
64  * @dwork: delayed work template;
65  * @lock: spin lock;
66  * @hwmon: hwmon device;
67  * @mlxreg_hotplug_attr: sysfs attributes array;
68  * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
69  * @group: sysfs attribute group;
70  * @groups: list of sysfs attribute group for hwmon registration;
71  * @cell: location of top aggregation interrupt register;
72  * @mask: top aggregation interrupt common mask;
73  * @aggr_cache: last value of aggregation register status;
74  */
75 struct mlxreg_hotplug_priv_data {
76         int irq;
77         struct device *dev;
78         struct platform_device *pdev;
79         struct mlxreg_hotplug_platform_data *plat;
80         struct regmap *regmap;
81         struct delayed_work dwork_irq;
82         struct delayed_work dwork;
83         spinlock_t lock; /* sync with interrupt */
84         struct device *hwmon;
85         struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
86         struct sensor_device_attribute_2
87                         mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
88         struct attribute_group group;
89         const struct attribute_group *groups[2];
90         u32 cell;
91         u32 mask;
92         u32 aggr_cache;
93         bool after_probe;
94 };
95
96 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
97                                         struct mlxreg_core_data *data)
98 {
99         struct mlxreg_core_hotplug_platform_data *pdata;
100
101         /*
102          * Return if adapter number is negative. It could be in case hotplug
103          * event is not associated with hotplug device.
104          */
105         if (data->hpdev.nr < 0)
106                 return 0;
107
108         pdata = dev_get_platdata(&priv->pdev->dev);
109         data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
110                                               pdata->shift_nr);
111         if (!data->hpdev.adapter) {
112                 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
113                         data->hpdev.nr + pdata->shift_nr);
114                 return -EFAULT;
115         }
116
117         data->hpdev.client = i2c_new_device(data->hpdev.adapter,
118                                             data->hpdev.brdinfo);
119         if (!data->hpdev.client) {
120                 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
121                         data->hpdev.brdinfo->type, data->hpdev.nr +
122                         pdata->shift_nr, data->hpdev.brdinfo->addr);
123
124                 i2c_put_adapter(data->hpdev.adapter);
125                 data->hpdev.adapter = NULL;
126                 return -EFAULT;
127         }
128
129         return 0;
130 }
131
132 static void mlxreg_hotplug_device_destroy(struct mlxreg_core_data *data)
133 {
134         if (data->hpdev.client) {
135                 i2c_unregister_device(data->hpdev.client);
136                 data->hpdev.client = NULL;
137         }
138
139         if (data->hpdev.adapter) {
140                 i2c_put_adapter(data->hpdev.adapter);
141                 data->hpdev.adapter = NULL;
142         }
143 }
144
145 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
146                                         struct device_attribute *attr,
147                                         char *buf)
148 {
149         struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
150         struct mlxreg_core_hotplug_platform_data *pdata;
151         int index = to_sensor_dev_attr_2(attr)->index;
152         int nr = to_sensor_dev_attr_2(attr)->nr;
153         struct mlxreg_core_item *item;
154         struct mlxreg_core_data *data;
155         u32 regval;
156         int ret;
157
158         pdata = dev_get_platdata(&priv->pdev->dev);
159         item = pdata->items + nr;
160         data = item->data + index;
161
162         ret = regmap_read(priv->regmap, data->reg, &regval);
163         if (ret)
164                 return ret;
165
166         if (item->health) {
167                 regval &= data->mask;
168         } else {
169                 /* Bit = 0 : functional if item->inversed is true. */
170                 if (item->inversed)
171                         regval = !(regval & data->mask);
172                 else
173                         regval = !!(regval & data->mask);
174         }
175
176         return sprintf(buf, "%u\n", regval);
177 }
178
179 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
180 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
181
182 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
183 {
184         struct mlxreg_core_hotplug_platform_data *pdata;
185         struct mlxreg_core_item *item;
186         struct mlxreg_core_data *data;
187         int num_attrs = 0, id = 0, i, j;
188
189         pdata = dev_get_platdata(&priv->pdev->dev);
190         item = pdata->items;
191
192         /* Go over all kinds of items - psu, pwr, fan. */
193         for (i = 0; i < pdata->counter; i++, item++) {
194                 num_attrs += item->count;
195                 data = item->data;
196                 /* Go over all units within the item. */
197                 for (j = 0; j < item->count; j++, data++, id++) {
198                         PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
199                         PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
200                                                              GFP_KERNEL,
201                                                              data->label);
202
203                         if (!PRIV_ATTR(id)->name) {
204                                 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
205                                         id);
206                                 return -ENOMEM;
207                         }
208
209                         PRIV_DEV_ATTR(id).dev_attr.attr.name =
210                                                         PRIV_ATTR(id)->name;
211                         PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
212                         PRIV_DEV_ATTR(id).dev_attr.show =
213                                                 mlxreg_hotplug_attr_show;
214                         PRIV_DEV_ATTR(id).nr = i;
215                         PRIV_DEV_ATTR(id).index = j;
216                         sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
217                 }
218         }
219
220         priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
221                                          num_attrs,
222                                          sizeof(struct attribute *),
223                                          GFP_KERNEL);
224         if (!priv->group.attrs)
225                 return -ENOMEM;
226
227         priv->group.attrs = priv->mlxreg_hotplug_attr;
228         priv->groups[0] = &priv->group;
229         priv->groups[1] = NULL;
230
231         return 0;
232 }
233
234 static void
235 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
236                            struct mlxreg_core_item *item)
237 {
238         struct mlxreg_core_data *data;
239         u32 asserted, regval, bit;
240         int ret;
241
242         /*
243          * Validate if item related to received signal type is valid.
244          * It should never happen, excepted the situation when some
245          * piece of hardware is broken. In such situation just produce
246          * error message and return. Caller must continue to handle the
247          * signals from other devices if any.
248          */
249         if (unlikely(!item)) {
250                 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
251                         item->reg, item->mask);
252
253                 return;
254         }
255
256         /* Mask event. */
257         ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
258                            0);
259         if (ret)
260                 goto out;
261
262         /* Read status. */
263         ret = regmap_read(priv->regmap, item->reg, &regval);
264         if (ret)
265                 goto out;
266
267         /* Set asserted bits and save last status. */
268         regval &= item->mask;
269         asserted = item->cache ^ regval;
270         item->cache = regval;
271
272         for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
273                 data = item->data + bit;
274                 if (regval & BIT(bit)) {
275                         if (item->inversed)
276                                 mlxreg_hotplug_device_destroy(data);
277                         else
278                                 mlxreg_hotplug_device_create(priv, data);
279                 } else {
280                         if (item->inversed)
281                                 mlxreg_hotplug_device_create(priv, data);
282                         else
283                                 mlxreg_hotplug_device_destroy(data);
284                 }
285         }
286
287         /* Acknowledge event. */
288         ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
289                            0);
290         if (ret)
291                 goto out;
292
293         /* Unmask event. */
294         ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
295                            item->mask);
296
297  out:
298         if (ret)
299                 dev_err(priv->dev, "Failed to complete workqueue.\n");
300 }
301
302 static void
303 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
304                                   struct mlxreg_core_item *item)
305 {
306         struct mlxreg_core_data *data = item->data;
307         u32 regval;
308         int i, ret = 0;
309
310         for (i = 0; i < item->count; i++, data++) {
311                 /* Mask event. */
312                 ret = regmap_write(priv->regmap, data->reg +
313                                    MLXREG_HOTPLUG_MASK_OFF, 0);
314                 if (ret)
315                         goto out;
316
317                 /* Read status. */
318                 ret = regmap_read(priv->regmap, data->reg, &regval);
319                 if (ret)
320                         goto out;
321
322                 regval &= data->mask;
323                 item->cache = regval;
324                 if (regval == MLXREG_HOTPLUG_HEALTH_MASK) {
325                         if ((data->health_cntr++ == MLXREG_HOTPLUG_RST_CNTR) ||
326                             !priv->after_probe) {
327                                 mlxreg_hotplug_device_create(priv, data);
328                                 data->attached = true;
329                         }
330                 } else {
331                         if (data->attached) {
332                                 mlxreg_hotplug_device_destroy(data);
333                                 data->attached = false;
334                                 data->health_cntr = 0;
335                         }
336                 }
337
338                 /* Acknowledge event. */
339                 ret = regmap_write(priv->regmap, data->reg +
340                                    MLXREG_HOTPLUG_EVENT_OFF, 0);
341                 if (ret)
342                         goto out;
343
344                 /* Unmask event. */
345                 ret = regmap_write(priv->regmap, data->reg +
346                                    MLXREG_HOTPLUG_MASK_OFF, data->mask);
347                 if (ret)
348                         goto out;
349         }
350
351  out:
352         if (ret)
353                 dev_err(priv->dev, "Failed to complete workqueue.\n");
354 }
355
356 /*
357  * mlxreg_hotplug_work_handler - performs traversing of device interrupt
358  * registers according to the below hierarchy schema:
359  *
360  *                              Aggregation registers (status/mask)
361  * PSU registers:               *---*
362  * *-----------------*          |   |
363  * |status/event/mask|----->    | * |
364  * *-----------------*          |   |
365  * Power registers:             |   |
366  * *-----------------*          |   |
367  * |status/event/mask|----->    | * |
368  * *-----------------*          |   |
369  * FAN registers:               |   |--> CPU
370  * *-----------------*          |   |
371  * |status/event/mask|----->    | * |
372  * *-----------------*          |   |
373  * ASIC registers:              |   |
374  * *-----------------*          |   |
375  * |status/event/mask|----->    | * |
376  * *-----------------*          |   |
377  *                              *---*
378  *
379  * In case some system changed are detected: FAN in/out, PSU in/out, power
380  * cable attached/detached, ASIC health good/bad, relevant device is created
381  * or destroyed.
382  */
383 static void mlxreg_hotplug_work_handler(struct work_struct *work)
384 {
385         struct mlxreg_core_hotplug_platform_data *pdata;
386         struct mlxreg_hotplug_priv_data *priv;
387         struct mlxreg_core_item *item;
388         u32 regval, aggr_asserted;
389         unsigned long flags;
390         int i, ret;
391
392         priv = container_of(work, struct mlxreg_hotplug_priv_data,
393                             dwork_irq.work);
394         pdata = dev_get_platdata(&priv->pdev->dev);
395         item = pdata->items;
396
397         /* Mask aggregation event. */
398         ret = regmap_write(priv->regmap, pdata->cell +
399                            MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
400         if (ret < 0)
401                 goto out;
402
403         /* Read aggregation status. */
404         ret = regmap_read(priv->regmap, pdata->cell, &regval);
405         if (ret)
406                 goto out;
407
408         regval &= pdata->mask;
409         aggr_asserted = priv->aggr_cache ^ regval;
410         priv->aggr_cache = regval;
411
412         /* Handle topology and health configuration changes. */
413         for (i = 0; i < pdata->counter; i++, item++) {
414                 if (aggr_asserted & item->aggr_mask) {
415                         if (item->health)
416                                 mlxreg_hotplug_health_work_helper(priv, item);
417                         else
418                                 mlxreg_hotplug_work_helper(priv, item);
419                 }
420         }
421
422         if (aggr_asserted) {
423                 spin_lock_irqsave(&priv->lock, flags);
424
425                 /*
426                  * It is possible, that some signals have been inserted, while
427                  * interrupt has been masked by mlxreg_hotplug_work_handler.
428                  * In this case such signals will be missed. In order to handle
429                  * these signals delayed work is canceled and work task
430                  * re-scheduled for immediate execution. It allows to handle
431                  * missed signals, if any. In other case work handler just
432                  * validates that no new signals have been received during
433                  * masking.
434                  */
435                 cancel_delayed_work(&priv->dwork_irq);
436                 schedule_delayed_work(&priv->dwork_irq, 0);
437
438                 spin_unlock_irqrestore(&priv->lock, flags);
439
440                 return;
441         }
442
443         /* Unmask aggregation event (no need acknowledge). */
444         ret = regmap_write(priv->regmap, pdata->cell +
445                            MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
446
447  out:
448         if (ret)
449                 dev_err(priv->dev, "Failed to complete workqueue.\n");
450 }
451
452 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
453 {
454         struct mlxreg_core_hotplug_platform_data *pdata;
455         struct mlxreg_core_item *item;
456         int i, ret;
457
458         pdata = dev_get_platdata(&priv->pdev->dev);
459         item = pdata->items;
460
461         for (i = 0; i < pdata->counter; i++, item++) {
462                 /* Clear group presense event. */
463                 ret = regmap_write(priv->regmap, item->reg +
464                                    MLXREG_HOTPLUG_EVENT_OFF, 0);
465                 if (ret)
466                         goto out;
467
468                 /* Set group initial status as mask and unmask group event. */
469                 if (item->inversed) {
470                         item->cache = item->mask;
471                         ret = regmap_write(priv->regmap, item->reg +
472                                            MLXREG_HOTPLUG_MASK_OFF,
473                                            item->mask);
474                         if (ret)
475                                 goto out;
476                 }
477         }
478
479         /* Keep aggregation initial status as zero and unmask events. */
480         ret = regmap_write(priv->regmap, pdata->cell +
481                            MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
482         if (ret)
483                 goto out;
484
485         /* Keep low aggregation initial status as zero and unmask events. */
486         if (pdata->cell_low) {
487                 ret = regmap_write(priv->regmap, pdata->cell_low +
488                                    MLXREG_HOTPLUG_AGGR_MASK_OFF,
489                                    pdata->mask_low);
490                 if (ret)
491                         goto out;
492         }
493
494         /* Invoke work handler for initializing hot plug devices setting. */
495         mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
496
497  out:
498         if (ret)
499                 dev_err(priv->dev, "Failed to set interrupts.\n");
500         enable_irq(priv->irq);
501         return ret;
502 }
503
504 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
505 {
506         struct mlxreg_core_hotplug_platform_data *pdata;
507         struct mlxreg_core_item *item;
508         struct mlxreg_core_data *data;
509         int count, i, j;
510
511         pdata = dev_get_platdata(&priv->pdev->dev);
512         item = pdata->items;
513         disable_irq(priv->irq);
514         cancel_delayed_work_sync(&priv->dwork_irq);
515
516         /* Mask low aggregation event, if defined. */
517         if (pdata->cell_low)
518                 regmap_write(priv->regmap, pdata->cell_low +
519                              MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
520
521         /* Mask aggregation event. */
522         regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
523                      0);
524
525         /* Clear topology configurations. */
526         for (i = 0; i < pdata->counter; i++, item++) {
527                 data = item->data;
528                 /* Mask group presense event. */
529                 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
530                              0);
531                 /* Clear group presense event. */
532                 regmap_write(priv->regmap, data->reg +
533                              MLXREG_HOTPLUG_EVENT_OFF, 0);
534
535                 /* Remove all the attached devices in group. */
536                 count = item->count;
537                 for (j = 0; j < count; j++, data++)
538                         mlxreg_hotplug_device_destroy(data);
539         }
540 }
541
542 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
543 {
544         struct mlxreg_hotplug_priv_data *priv;
545
546         priv = (struct mlxreg_hotplug_priv_data *)dev;
547
548         /* Schedule work task for immediate execution.*/
549         schedule_delayed_work(&priv->dwork_irq, 0);
550
551         return IRQ_HANDLED;
552 }
553
554 static int mlxreg_hotplug_probe(struct platform_device *pdev)
555 {
556         struct mlxreg_core_hotplug_platform_data *pdata;
557         struct mlxreg_hotplug_priv_data *priv;
558         struct i2c_adapter *deferred_adap;
559         int err;
560
561         pdata = dev_get_platdata(&pdev->dev);
562         if (!pdata) {
563                 dev_err(&pdev->dev, "Failed to get platform data.\n");
564                 return -EINVAL;
565         }
566
567         /* Defer probing if the necessary adapter is not configured yet. */
568         deferred_adap = i2c_get_adapter(pdata->deferred_nr);
569         if (!deferred_adap)
570                 return -EPROBE_DEFER;
571         i2c_put_adapter(deferred_adap);
572
573         priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
574         if (!priv)
575                 return -ENOMEM;
576
577         if (pdata->irq) {
578                 priv->irq = pdata->irq;
579         } else {
580                 priv->irq = platform_get_irq(pdev, 0);
581                 if (priv->irq < 0) {
582                         dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
583                                 priv->irq);
584                         return priv->irq;
585                 }
586         }
587
588         priv->regmap = pdata->regmap;
589         priv->dev = pdev->dev.parent;
590         priv->pdev = pdev;
591
592         err = devm_request_irq(&pdev->dev, priv->irq,
593                                mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
594                                | IRQF_SHARED, "mlxreg-hotplug", priv);
595         if (err) {
596                 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
597                 return err;
598         }
599
600         disable_irq(priv->irq);
601         spin_lock_init(&priv->lock);
602         INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
603         /* Perform initial interrupts setup. */
604         mlxreg_hotplug_set_irq(priv);
605
606         priv->after_probe = true;
607         dev_set_drvdata(&pdev->dev, priv);
608
609         err = mlxreg_hotplug_attr_init(priv);
610         if (err) {
611                 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
612                         err);
613                 return err;
614         }
615
616         priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
617                                         "mlxreg_hotplug", priv, priv->groups);
618         if (IS_ERR(priv->hwmon)) {
619                 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
620                         PTR_ERR(priv->hwmon));
621                 return PTR_ERR(priv->hwmon);
622         }
623
624         return 0;
625 }
626
627 static int mlxreg_hotplug_remove(struct platform_device *pdev)
628 {
629         struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
630
631         /* Clean interrupts setup. */
632         mlxreg_hotplug_unset_irq(priv);
633
634         return 0;
635 }
636
637 static struct platform_driver mlxreg_hotplug_driver = {
638         .driver = {
639                 .name = "mlxreg-hotplug",
640         },
641         .probe = mlxreg_hotplug_probe,
642         .remove = mlxreg_hotplug_remove,
643 };
644
645 module_platform_driver(mlxreg_hotplug_driver);
646
647 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
648 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
649 MODULE_LICENSE("Dual BSD/GPL");
650 MODULE_ALIAS("platform:mlxreg-hotplug");