Documentation: embargoed-hardware-issues.rst: Add myself for Power
[sfrench/cifs-2.6.git] / drivers / net / ethernet / amd / pds_core / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6
7 #include "core.h"
8
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10
11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13         return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16
17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19         blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22
23 void pdsc_notify(unsigned long event, void *data)
24 {
25         blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27
28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30         struct pdsc_intr_info *intr_info;
31
32         if (index >= pdsc->nintrs || index < 0) {
33                 WARN(true, "bad intr index %d\n", index);
34                 return;
35         }
36
37         intr_info = &pdsc->intr_info[index];
38         if (!intr_info->vector)
39                 return;
40         dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41                 __func__, index, intr_info->vector, intr_info->name);
42
43         pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44         pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45
46         free_irq(intr_info->vector, intr_info->data);
47
48         memset(intr_info, 0, sizeof(*intr_info));
49 }
50
51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52                     irq_handler_t handler, void *data)
53 {
54         struct pdsc_intr_info *intr_info;
55         unsigned int index;
56         int err;
57
58         /* Find the first available interrupt */
59         for (index = 0; index < pdsc->nintrs; index++)
60                 if (!pdsc->intr_info[index].vector)
61                         break;
62         if (index >= pdsc->nintrs) {
63                 dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64                          __func__, index, pdsc->nintrs);
65                 return -ENOSPC;
66         }
67
68         pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69                                   PDS_CORE_INTR_CRED_RESET_COALESCE);
70
71         intr_info = &pdsc->intr_info[index];
72
73         intr_info->index = index;
74         intr_info->data = data;
75         strscpy(intr_info->name, name, sizeof(intr_info->name));
76
77         /* Get the OS vector number for the interrupt */
78         err = pci_irq_vector(pdsc->pdev, index);
79         if (err < 0) {
80                 dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81                         index, ERR_PTR(err));
82                 goto err_out_free_intr;
83         }
84         intr_info->vector = err;
85
86         /* Init the device's intr mask */
87         pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88         pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89         pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90
91         /* Register the isr with a name */
92         err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93         if (err) {
94                 dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95                         intr_info->vector, ERR_PTR(err));
96                 goto err_out_free_intr;
97         }
98
99         return index;
100
101 err_out_free_intr:
102         pdsc_intr_free(pdsc, index);
103         return err;
104 }
105
106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108         if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109             qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110                 return;
111
112         pdsc_intr_free(pdsc, qcq->intx);
113         qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115
116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118         char name[PDSC_INTR_NAME_MAX_SZ];
119         int index;
120
121         if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122                 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123                 return 0;
124         }
125
126         snprintf(name, sizeof(name), "%s-%d-%s",
127                  PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128         index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
129         if (index < 0)
130                 return index;
131         qcq->intx = index;
132         qcq->cq.bound_intr = &pdsc->intr_info[index];
133
134         return 0;
135 }
136
137 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
138 {
139         struct device *dev = pdsc->dev;
140
141         if (!(qcq && qcq->pdsc))
142                 return;
143
144         pdsc_debugfs_del_qcq(qcq);
145
146         pdsc_qcq_intr_free(pdsc, qcq);
147
148         if (qcq->q_base)
149                 dma_free_coherent(dev, qcq->q_size,
150                                   qcq->q_base, qcq->q_base_pa);
151
152         if (qcq->cq_base)
153                 dma_free_coherent(dev, qcq->cq_size,
154                                   qcq->cq_base, qcq->cq_base_pa);
155
156         vfree(qcq->cq.info);
157         vfree(qcq->q.info);
158
159         memset(qcq, 0, sizeof(*qcq));
160 }
161
162 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
163 {
164         struct pdsc_q_info *cur;
165         unsigned int i;
166
167         q->base = base;
168         q->base_pa = base_pa;
169
170         for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
171                 cur->desc = base + (i * q->desc_size);
172 }
173
174 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
175 {
176         struct pdsc_cq_info *cur;
177         unsigned int i;
178
179         cq->base = base;
180         cq->base_pa = base_pa;
181
182         for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
183                 cur->comp = base + (i * cq->desc_size);
184 }
185
186 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
187                    const char *name, unsigned int flags, unsigned int num_descs,
188                    unsigned int desc_size, unsigned int cq_desc_size,
189                    unsigned int pid, struct pdsc_qcq *qcq)
190 {
191         struct device *dev = pdsc->dev;
192         void *q_base, *cq_base;
193         dma_addr_t cq_base_pa;
194         dma_addr_t q_base_pa;
195         int err;
196
197         qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
198         if (!qcq->q.info) {
199                 err = -ENOMEM;
200                 goto err_out;
201         }
202
203         qcq->pdsc = pdsc;
204         qcq->flags = flags;
205         INIT_WORK(&qcq->work, pdsc_work_thread);
206
207         qcq->q.type = type;
208         qcq->q.index = index;
209         qcq->q.num_descs = num_descs;
210         qcq->q.desc_size = desc_size;
211         qcq->q.tail_idx = 0;
212         qcq->q.head_idx = 0;
213         qcq->q.pid = pid;
214         snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
215
216         err = pdsc_qcq_intr_alloc(pdsc, qcq);
217         if (err)
218                 goto err_out_free_q_info;
219
220         qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
221         if (!qcq->cq.info) {
222                 err = -ENOMEM;
223                 goto err_out_free_irq;
224         }
225
226         qcq->cq.num_descs = num_descs;
227         qcq->cq.desc_size = cq_desc_size;
228         qcq->cq.tail_idx = 0;
229         qcq->cq.done_color = 1;
230
231         if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
232                 /* q & cq need to be contiguous in case of notifyq */
233                 qcq->q_size = PDS_PAGE_SIZE +
234                               ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
235                               ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
236                 qcq->q_base = dma_alloc_coherent(dev,
237                                                  qcq->q_size + qcq->cq_size,
238                                                  &qcq->q_base_pa,
239                                                  GFP_KERNEL);
240                 if (!qcq->q_base) {
241                         err = -ENOMEM;
242                         goto err_out_free_cq_info;
243                 }
244                 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
245                 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
246                 pdsc_q_map(&qcq->q, q_base, q_base_pa);
247
248                 cq_base = PTR_ALIGN(q_base +
249                                     ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
250                                     PDS_PAGE_SIZE);
251                 cq_base_pa = ALIGN(qcq->q_base_pa +
252                                    ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
253                                    PDS_PAGE_SIZE);
254
255         } else {
256                 /* q DMA descriptors */
257                 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
258                 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
259                                                  &qcq->q_base_pa,
260                                                  GFP_KERNEL);
261                 if (!qcq->q_base) {
262                         err = -ENOMEM;
263                         goto err_out_free_cq_info;
264                 }
265                 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
266                 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
267                 pdsc_q_map(&qcq->q, q_base, q_base_pa);
268
269                 /* cq DMA descriptors */
270                 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
271                 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
272                                                   &qcq->cq_base_pa,
273                                                   GFP_KERNEL);
274                 if (!qcq->cq_base) {
275                         err = -ENOMEM;
276                         goto err_out_free_q;
277                 }
278                 cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
279                 cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
280         }
281
282         pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
283         qcq->cq.bound_q = &qcq->q;
284
285         pdsc_debugfs_add_qcq(pdsc, qcq);
286
287         return 0;
288
289 err_out_free_q:
290         dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
291 err_out_free_cq_info:
292         vfree(qcq->cq.info);
293 err_out_free_irq:
294         pdsc_qcq_intr_free(pdsc, qcq);
295 err_out_free_q_info:
296         vfree(qcq->q.info);
297         memset(qcq, 0, sizeof(*qcq));
298 err_out:
299         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
300         return err;
301 }
302
303 static void pdsc_core_uninit(struct pdsc *pdsc)
304 {
305         pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
306         pdsc_qcq_free(pdsc, &pdsc->adminqcq);
307
308         if (pdsc->kern_dbpage) {
309                 iounmap(pdsc->kern_dbpage);
310                 pdsc->kern_dbpage = NULL;
311         }
312 }
313
314 static int pdsc_core_init(struct pdsc *pdsc)
315 {
316         union pds_core_dev_comp comp = {};
317         union pds_core_dev_cmd cmd = {
318                 .init.opcode = PDS_CORE_CMD_INIT,
319         };
320         struct pds_core_dev_init_data_out cido;
321         struct pds_core_dev_init_data_in cidi;
322         u32 dbid_count;
323         u32 dbpage_num;
324         int numdescs;
325         size_t sz;
326         int err;
327
328         /* Scale the descriptor ring length based on number of CPUs and VFs */
329         numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
330         numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
331         numdescs = roundup_pow_of_two(numdescs);
332         err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
333                              PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
334                              numdescs,
335                              sizeof(union pds_core_adminq_cmd),
336                              sizeof(union pds_core_adminq_comp),
337                              0, &pdsc->adminqcq);
338         if (err)
339                 return err;
340
341         err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
342                              PDS_CORE_QCQ_F_NOTIFYQ,
343                              PDSC_NOTIFYQ_LENGTH,
344                              sizeof(struct pds_core_notifyq_cmd),
345                              sizeof(union pds_core_notifyq_comp),
346                              0, &pdsc->notifyqcq);
347         if (err)
348                 goto err_out_uninit;
349
350         cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
351         cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
352         cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
353         cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
354         cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
355         cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
356         cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
357
358         mutex_lock(&pdsc->devcmd_lock);
359
360         sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
361         memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
362
363         err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
364         if (!err) {
365                 sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
366                 memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
367         }
368
369         mutex_unlock(&pdsc->devcmd_lock);
370         if (err) {
371                 dev_err(pdsc->dev, "Device init command failed: %pe\n",
372                         ERR_PTR(err));
373                 goto err_out_uninit;
374         }
375
376         pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
377
378         dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
379         dbpage_num = pdsc->hw_index * dbid_count;
380         pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
381         if (!pdsc->kern_dbpage) {
382                 dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
383                 err = -ENOMEM;
384                 goto err_out_uninit;
385         }
386
387         pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
388         pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
389         pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
390
391         pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
392         pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
393         pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
394
395         pdsc->last_eid = 0;
396
397         return 0;
398
399 err_out_uninit:
400         pdsc_core_uninit(pdsc);
401         return err;
402 }
403
404 static struct pdsc_viftype pdsc_viftype_defaults[] = {
405         [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
406                                 .vif_id = PDS_DEV_TYPE_VDPA,
407                                 .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
408         [PDS_DEV_TYPE_MAX] = {}
409 };
410
411 static int pdsc_viftypes_init(struct pdsc *pdsc)
412 {
413         enum pds_core_vif_types vt;
414
415         pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
416                                        GFP_KERNEL);
417         if (!pdsc->viftype_status)
418                 return -ENOMEM;
419
420         for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
421                 bool vt_support;
422
423                 if (!pdsc_viftype_defaults[vt].name)
424                         continue;
425
426                 /* Grab the defaults */
427                 pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
428
429                 /* See what the Core device has for support */
430                 vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
431                 dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
432                         pdsc->viftype_status[vt].name,
433                         vt_support ? "" : "not ");
434
435                 pdsc->viftype_status[vt].supported = vt_support;
436         }
437
438         return 0;
439 }
440
441 int pdsc_setup(struct pdsc *pdsc, bool init)
442 {
443         int err;
444
445         err = pdsc_dev_init(pdsc);
446         if (err)
447                 return err;
448
449         /* Set up the Core with the AdminQ and NotifyQ info */
450         err = pdsc_core_init(pdsc);
451         if (err)
452                 goto err_out_teardown;
453
454         /* Set up the VIFs */
455         if (init) {
456                 err = pdsc_viftypes_init(pdsc);
457                 if (err)
458                         goto err_out_teardown;
459
460                 pdsc_debugfs_add_viftype(pdsc);
461         }
462
463         refcount_set(&pdsc->adminq_refcnt, 1);
464         clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
465         return 0;
466
467 err_out_teardown:
468         pdsc_teardown(pdsc, init);
469         return err;
470 }
471
472 void pdsc_teardown(struct pdsc *pdsc, bool removing)
473 {
474         if (!pdsc->pdev->is_virtfn)
475                 pdsc_devcmd_reset(pdsc);
476         if (pdsc->adminqcq.work.func)
477                 cancel_work_sync(&pdsc->adminqcq.work);
478
479         pdsc_core_uninit(pdsc);
480
481         if (removing) {
482                 kfree(pdsc->viftype_status);
483                 pdsc->viftype_status = NULL;
484         }
485
486         pdsc_dev_uninit(pdsc);
487
488         set_bit(PDSC_S_FW_DEAD, &pdsc->state);
489 }
490
491 int pdsc_start(struct pdsc *pdsc)
492 {
493         pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
494                            PDS_CORE_INTR_MASK_CLEAR);
495
496         return 0;
497 }
498
499 void pdsc_stop(struct pdsc *pdsc)
500 {
501         int i;
502
503         if (!pdsc->intr_info)
504                 return;
505
506         /* Mask interrupts that are in use */
507         for (i = 0; i < pdsc->nintrs; i++)
508                 if (pdsc->intr_info[i].vector)
509                         pds_core_intr_mask(&pdsc->intr_ctrl[i],
510                                            PDS_CORE_INTR_MASK_SET);
511 }
512
513 static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
514 {
515         /* The driver initializes the adminq_refcnt to 1 when the adminq is
516          * allocated and ready for use. Other users/requesters will increment
517          * the refcnt while in use. If the refcnt is down to 1 then the adminq
518          * is not in use and the refcnt can be cleared and adminq freed. Before
519          * calling this function the driver will set PDSC_S_FW_DEAD, which
520          * prevent subsequent attempts to use the adminq and increment the
521          * refcnt to fail. This guarantees that this function will eventually
522          * exit.
523          */
524         while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
525                 dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
526                                     __func__);
527                 cpu_relax();
528         }
529 }
530
531 void pdsc_fw_down(struct pdsc *pdsc)
532 {
533         union pds_core_notifyq_comp reset_event = {
534                 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
535                 .reset.state = 0,
536         };
537
538         if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
539                 dev_warn(pdsc->dev, "%s: already happening\n", __func__);
540                 return;
541         }
542
543         if (pdsc->pdev->is_virtfn)
544                 return;
545
546         pdsc_adminq_wait_and_dec_once_unused(pdsc);
547
548         /* Notify clients of fw_down */
549         if (pdsc->fw_reporter)
550                 devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
551         pdsc_notify(PDS_EVENT_RESET, &reset_event);
552
553         pdsc_stop(pdsc);
554         pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
555 }
556
557 void pdsc_fw_up(struct pdsc *pdsc)
558 {
559         union pds_core_notifyq_comp reset_event = {
560                 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
561                 .reset.state = 1,
562         };
563         int err;
564
565         if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
566                 dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
567                 return;
568         }
569
570         if (pdsc->pdev->is_virtfn) {
571                 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
572                 return;
573         }
574
575         err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
576         if (err)
577                 goto err_out;
578
579         err = pdsc_start(pdsc);
580         if (err)
581                 goto err_out;
582
583         /* Notify clients of fw_up */
584         pdsc->fw_recoveries++;
585         if (pdsc->fw_reporter)
586                 devlink_health_reporter_state_update(pdsc->fw_reporter,
587                                                      DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
588         pdsc_notify(PDS_EVENT_RESET, &reset_event);
589
590         return;
591
592 err_out:
593         pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
594 }
595
596 static void pdsc_check_pci_health(struct pdsc *pdsc)
597 {
598         u8 fw_status;
599
600         /* some sort of teardown already in progress */
601         if (!pdsc->info_regs)
602                 return;
603
604         fw_status = ioread8(&pdsc->info_regs->fw_status);
605
606         /* is PCI broken? */
607         if (fw_status != PDS_RC_BAD_PCI)
608                 return;
609
610         pci_reset_function(pdsc->pdev);
611 }
612
613 void pdsc_health_thread(struct work_struct *work)
614 {
615         struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
616         unsigned long mask;
617         bool healthy;
618
619         mutex_lock(&pdsc->config_lock);
620
621         /* Don't do a check when in a transition state */
622         mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
623                BIT_ULL(PDSC_S_STOPPING_DRIVER);
624         if (pdsc->state & mask)
625                 goto out_unlock;
626
627         healthy = pdsc_is_fw_good(pdsc);
628         dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
629                 __func__, healthy, pdsc->fw_status, pdsc->last_hb);
630
631         if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
632                 if (healthy)
633                         pdsc_fw_up(pdsc);
634         } else {
635                 if (!healthy)
636                         pdsc_fw_down(pdsc);
637         }
638
639         pdsc_check_pci_health(pdsc);
640
641         pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
642
643 out_unlock:
644         mutex_unlock(&pdsc->config_lock);
645 }