Merge branch 'turbostat' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
[sfrench/cifs-2.6.git] / drivers / misc / cxl / sysfs.c
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/sysfs.h>
13 #include <linux/pci_regs.h>
14
15 #include "cxl.h"
16
17 #define to_afu_chardev_m(d) dev_get_drvdata(d)
18
19 /*********  Adapter attributes  **********************************************/
20
21 static ssize_t caia_version_show(struct device *device,
22                                  struct device_attribute *attr,
23                                  char *buf)
24 {
25         struct cxl *adapter = to_cxl_adapter(device);
26
27         return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
28                          adapter->caia_minor);
29 }
30
31 static ssize_t psl_revision_show(struct device *device,
32                                  struct device_attribute *attr,
33                                  char *buf)
34 {
35         struct cxl *adapter = to_cxl_adapter(device);
36
37         return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
38 }
39
40 static ssize_t base_image_show(struct device *device,
41                                struct device_attribute *attr,
42                                char *buf)
43 {
44         struct cxl *adapter = to_cxl_adapter(device);
45
46         return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
47 }
48
49 static ssize_t image_loaded_show(struct device *device,
50                                  struct device_attribute *attr,
51                                  char *buf)
52 {
53         struct cxl *adapter = to_cxl_adapter(device);
54
55         if (adapter->user_image_loaded)
56                 return scnprintf(buf, PAGE_SIZE, "user\n");
57         return scnprintf(buf, PAGE_SIZE, "factory\n");
58 }
59
60 static ssize_t psl_timebase_synced_show(struct device *device,
61                                         struct device_attribute *attr,
62                                         char *buf)
63 {
64         struct cxl *adapter = to_cxl_adapter(device);
65         u64 psl_tb, delta;
66
67         /* Recompute the status only in native mode */
68         if (cpu_has_feature(CPU_FTR_HVMODE)) {
69                 psl_tb = adapter->native->sl_ops->timebase_read(adapter);
70                 delta = abs(mftb() - psl_tb);
71
72                 /* CORE TB and PSL TB difference <= 16usecs ? */
73                 adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
74                 pr_devel("PSL timebase %s - delta: 0x%016llx\n",
75                          (tb_to_ns(delta) < 16000) ? "synchronized" :
76                          "not synchronized", tb_to_ns(delta));
77         }
78         return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
79 }
80
81 static ssize_t tunneled_ops_supported_show(struct device *device,
82                                         struct device_attribute *attr,
83                                         char *buf)
84 {
85         struct cxl *adapter = to_cxl_adapter(device);
86
87         return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
88 }
89
90 static ssize_t reset_adapter_store(struct device *device,
91                                    struct device_attribute *attr,
92                                    const char *buf, size_t count)
93 {
94         struct cxl *adapter = to_cxl_adapter(device);
95         int rc;
96         int val;
97
98         rc = sscanf(buf, "%i", &val);
99         if ((rc != 1) || (val != 1 && val != -1))
100                 return -EINVAL;
101
102         /*
103          * See if we can lock the context mapping that's only allowed
104          * when there are no contexts attached to the adapter. Once
105          * taken this will also prevent any context from getting activated.
106          */
107         if (val == 1) {
108                 rc =  cxl_adapter_context_lock(adapter);
109                 if (rc)
110                         goto out;
111
112                 rc = cxl_ops->adapter_reset(adapter);
113                 /* In case reset failed release context lock */
114                 if (rc)
115                         cxl_adapter_context_unlock(adapter);
116
117         } else if (val == -1) {
118                 /* Perform a forced adapter reset */
119                 rc = cxl_ops->adapter_reset(adapter);
120         }
121
122 out:
123         return rc ? rc : count;
124 }
125
126 static ssize_t load_image_on_perst_show(struct device *device,
127                                  struct device_attribute *attr,
128                                  char *buf)
129 {
130         struct cxl *adapter = to_cxl_adapter(device);
131
132         if (!adapter->perst_loads_image)
133                 return scnprintf(buf, PAGE_SIZE, "none\n");
134
135         if (adapter->perst_select_user)
136                 return scnprintf(buf, PAGE_SIZE, "user\n");
137         return scnprintf(buf, PAGE_SIZE, "factory\n");
138 }
139
140 static ssize_t load_image_on_perst_store(struct device *device,
141                                  struct device_attribute *attr,
142                                  const char *buf, size_t count)
143 {
144         struct cxl *adapter = to_cxl_adapter(device);
145         int rc;
146
147         if (!strncmp(buf, "none", 4))
148                 adapter->perst_loads_image = false;
149         else if (!strncmp(buf, "user", 4)) {
150                 adapter->perst_select_user = true;
151                 adapter->perst_loads_image = true;
152         } else if (!strncmp(buf, "factory", 7)) {
153                 adapter->perst_select_user = false;
154                 adapter->perst_loads_image = true;
155         } else
156                 return -EINVAL;
157
158         if ((rc = cxl_update_image_control(adapter)))
159                 return rc;
160
161         return count;
162 }
163
164 static ssize_t perst_reloads_same_image_show(struct device *device,
165                                  struct device_attribute *attr,
166                                  char *buf)
167 {
168         struct cxl *adapter = to_cxl_adapter(device);
169
170         return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
171 }
172
173 static ssize_t perst_reloads_same_image_store(struct device *device,
174                                  struct device_attribute *attr,
175                                  const char *buf, size_t count)
176 {
177         struct cxl *adapter = to_cxl_adapter(device);
178         int rc;
179         int val;
180
181         rc = sscanf(buf, "%i", &val);
182         if ((rc != 1) || !(val == 1 || val == 0))
183                 return -EINVAL;
184
185         adapter->perst_same_image = (val == 1 ? true : false);
186         return count;
187 }
188
189 static struct device_attribute adapter_attrs[] = {
190         __ATTR_RO(caia_version),
191         __ATTR_RO(psl_revision),
192         __ATTR_RO(base_image),
193         __ATTR_RO(image_loaded),
194         __ATTR_RO(psl_timebase_synced),
195         __ATTR_RO(tunneled_ops_supported),
196         __ATTR_RW(load_image_on_perst),
197         __ATTR_RW(perst_reloads_same_image),
198         __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
199 };
200
201
202 /*********  AFU master specific attributes  **********************************/
203
204 static ssize_t mmio_size_show_master(struct device *device,
205                                      struct device_attribute *attr,
206                                      char *buf)
207 {
208         struct cxl_afu *afu = to_afu_chardev_m(device);
209
210         return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
211 }
212
213 static ssize_t pp_mmio_off_show(struct device *device,
214                                 struct device_attribute *attr,
215                                 char *buf)
216 {
217         struct cxl_afu *afu = to_afu_chardev_m(device);
218
219         return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
220 }
221
222 static ssize_t pp_mmio_len_show(struct device *device,
223                                 struct device_attribute *attr,
224                                 char *buf)
225 {
226         struct cxl_afu *afu = to_afu_chardev_m(device);
227
228         return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
229 }
230
231 static struct device_attribute afu_master_attrs[] = {
232         __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
233         __ATTR_RO(pp_mmio_off),
234         __ATTR_RO(pp_mmio_len),
235 };
236
237
238 /*********  AFU attributes  **************************************************/
239
240 static ssize_t mmio_size_show(struct device *device,
241                               struct device_attribute *attr,
242                               char *buf)
243 {
244         struct cxl_afu *afu = to_cxl_afu(device);
245
246         if (afu->pp_size)
247                 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
248         return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
249 }
250
251 static ssize_t reset_store_afu(struct device *device,
252                                struct device_attribute *attr,
253                                const char *buf, size_t count)
254 {
255         struct cxl_afu *afu = to_cxl_afu(device);
256         int rc;
257
258         /* Not safe to reset if it is currently in use */
259         mutex_lock(&afu->contexts_lock);
260         if (!idr_is_empty(&afu->contexts_idr)) {
261                 rc = -EBUSY;
262                 goto err;
263         }
264
265         if ((rc = cxl_ops->afu_reset(afu)))
266                 goto err;
267
268         rc = count;
269 err:
270         mutex_unlock(&afu->contexts_lock);
271         return rc;
272 }
273
274 static ssize_t irqs_min_show(struct device *device,
275                              struct device_attribute *attr,
276                              char *buf)
277 {
278         struct cxl_afu *afu = to_cxl_afu(device);
279
280         return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
281 }
282
283 static ssize_t irqs_max_show(struct device *device,
284                                   struct device_attribute *attr,
285                                   char *buf)
286 {
287         struct cxl_afu *afu = to_cxl_afu(device);
288
289         return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
290 }
291
292 static ssize_t irqs_max_store(struct device *device,
293                                   struct device_attribute *attr,
294                                   const char *buf, size_t count)
295 {
296         struct cxl_afu *afu = to_cxl_afu(device);
297         ssize_t ret;
298         int irqs_max;
299
300         ret = sscanf(buf, "%i", &irqs_max);
301         if (ret != 1)
302                 return -EINVAL;
303
304         if (irqs_max < afu->pp_irqs)
305                 return -EINVAL;
306
307         if (cpu_has_feature(CPU_FTR_HVMODE)) {
308                 if (irqs_max > afu->adapter->user_irqs)
309                         return -EINVAL;
310         } else {
311                 /* pHyp sets a per-AFU limit */
312                 if (irqs_max > afu->guest->max_ints)
313                         return -EINVAL;
314         }
315
316         afu->irqs_max = irqs_max;
317         return count;
318 }
319
320 static ssize_t modes_supported_show(struct device *device,
321                                     struct device_attribute *attr, char *buf)
322 {
323         struct cxl_afu *afu = to_cxl_afu(device);
324         char *p = buf, *end = buf + PAGE_SIZE;
325
326         if (afu->modes_supported & CXL_MODE_DEDICATED)
327                 p += scnprintf(p, end - p, "dedicated_process\n");
328         if (afu->modes_supported & CXL_MODE_DIRECTED)
329                 p += scnprintf(p, end - p, "afu_directed\n");
330         return (p - buf);
331 }
332
333 static ssize_t prefault_mode_show(struct device *device,
334                                   struct device_attribute *attr,
335                                   char *buf)
336 {
337         struct cxl_afu *afu = to_cxl_afu(device);
338
339         switch (afu->prefault_mode) {
340         case CXL_PREFAULT_WED:
341                 return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
342         case CXL_PREFAULT_ALL:
343                 return scnprintf(buf, PAGE_SIZE, "all\n");
344         default:
345                 return scnprintf(buf, PAGE_SIZE, "none\n");
346         }
347 }
348
349 static ssize_t prefault_mode_store(struct device *device,
350                           struct device_attribute *attr,
351                           const char *buf, size_t count)
352 {
353         struct cxl_afu *afu = to_cxl_afu(device);
354         enum prefault_modes mode = -1;
355
356         if (!strncmp(buf, "none", 4))
357                 mode = CXL_PREFAULT_NONE;
358         else {
359                 if (!radix_enabled()) {
360
361                         /* only allowed when not in radix mode */
362                         if (!strncmp(buf, "work_element_descriptor", 23))
363                                 mode = CXL_PREFAULT_WED;
364                         if (!strncmp(buf, "all", 3))
365                                 mode = CXL_PREFAULT_ALL;
366                 } else {
367                         dev_err(device, "Cannot prefault with radix enabled\n");
368                 }
369         }
370
371         if (mode == -1)
372                 return -EINVAL;
373
374         afu->prefault_mode = mode;
375         return count;
376 }
377
378 static ssize_t mode_show(struct device *device,
379                          struct device_attribute *attr,
380                          char *buf)
381 {
382         struct cxl_afu *afu = to_cxl_afu(device);
383
384         if (afu->current_mode == CXL_MODE_DEDICATED)
385                 return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
386         if (afu->current_mode == CXL_MODE_DIRECTED)
387                 return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
388         return scnprintf(buf, PAGE_SIZE, "none\n");
389 }
390
391 static ssize_t mode_store(struct device *device, struct device_attribute *attr,
392                           const char *buf, size_t count)
393 {
394         struct cxl_afu *afu = to_cxl_afu(device);
395         int old_mode, mode = -1;
396         int rc = -EBUSY;
397
398         /* can't change this if we have a user */
399         mutex_lock(&afu->contexts_lock);
400         if (!idr_is_empty(&afu->contexts_idr))
401                 goto err;
402
403         if (!strncmp(buf, "dedicated_process", 17))
404                 mode = CXL_MODE_DEDICATED;
405         if (!strncmp(buf, "afu_directed", 12))
406                 mode = CXL_MODE_DIRECTED;
407         if (!strncmp(buf, "none", 4))
408                 mode = 0;
409
410         if (mode == -1) {
411                 rc = -EINVAL;
412                 goto err;
413         }
414
415         /*
416          * afu_deactivate_mode needs to be done outside the lock, prevent
417          * other contexts coming in before we are ready:
418          */
419         old_mode = afu->current_mode;
420         afu->current_mode = 0;
421         afu->num_procs = 0;
422
423         mutex_unlock(&afu->contexts_lock);
424
425         if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
426                 return rc;
427         if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
428                 return rc;
429
430         return count;
431 err:
432         mutex_unlock(&afu->contexts_lock);
433         return rc;
434 }
435
436 static ssize_t api_version_show(struct device *device,
437                                 struct device_attribute *attr,
438                                 char *buf)
439 {
440         return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
441 }
442
443 static ssize_t api_version_compatible_show(struct device *device,
444                                            struct device_attribute *attr,
445                                            char *buf)
446 {
447         return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
448 }
449
450 static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
451                                struct bin_attribute *bin_attr, char *buf,
452                                loff_t off, size_t count)
453 {
454         struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
455
456         return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
457 }
458
459 static struct device_attribute afu_attrs[] = {
460         __ATTR_RO(mmio_size),
461         __ATTR_RO(irqs_min),
462         __ATTR_RW(irqs_max),
463         __ATTR_RO(modes_supported),
464         __ATTR_RW(mode),
465         __ATTR_RW(prefault_mode),
466         __ATTR_RO(api_version),
467         __ATTR_RO(api_version_compatible),
468         __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
469 };
470
471 int cxl_sysfs_adapter_add(struct cxl *adapter)
472 {
473         struct device_attribute *dev_attr;
474         int i, rc;
475
476         for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
477                 dev_attr = &adapter_attrs[i];
478                 if (cxl_ops->support_attributes(dev_attr->attr.name,
479                                                 CXL_ADAPTER_ATTRS)) {
480                         if ((rc = device_create_file(&adapter->dev, dev_attr)))
481                                 goto err;
482                 }
483         }
484         return 0;
485 err:
486         for (i--; i >= 0; i--) {
487                 dev_attr = &adapter_attrs[i];
488                 if (cxl_ops->support_attributes(dev_attr->attr.name,
489                                                 CXL_ADAPTER_ATTRS))
490                         device_remove_file(&adapter->dev, dev_attr);
491         }
492         return rc;
493 }
494
495 void cxl_sysfs_adapter_remove(struct cxl *adapter)
496 {
497         struct device_attribute *dev_attr;
498         int i;
499
500         for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
501                 dev_attr = &adapter_attrs[i];
502                 if (cxl_ops->support_attributes(dev_attr->attr.name,
503                                                 CXL_ADAPTER_ATTRS))
504                         device_remove_file(&adapter->dev, dev_attr);
505         }
506 }
507
508 struct afu_config_record {
509         struct kobject kobj;
510         struct bin_attribute config_attr;
511         struct list_head list;
512         int cr;
513         u16 device;
514         u16 vendor;
515         u32 class;
516 };
517
518 #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
519
520 static ssize_t vendor_show(struct kobject *kobj,
521                            struct kobj_attribute *attr, char *buf)
522 {
523         struct afu_config_record *cr = to_cr(kobj);
524
525         return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
526 }
527
528 static ssize_t device_show(struct kobject *kobj,
529                            struct kobj_attribute *attr, char *buf)
530 {
531         struct afu_config_record *cr = to_cr(kobj);
532
533         return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
534 }
535
536 static ssize_t class_show(struct kobject *kobj,
537                           struct kobj_attribute *attr, char *buf)
538 {
539         struct afu_config_record *cr = to_cr(kobj);
540
541         return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
542 }
543
544 static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
545                                struct bin_attribute *bin_attr, char *buf,
546                                loff_t off, size_t count)
547 {
548         struct afu_config_record *cr = to_cr(kobj);
549         struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
550
551         u64 i, j, val, rc;
552
553         for (i = 0; i < count;) {
554                 rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
555                 if (rc)
556                         val = ~0ULL;
557                 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
558                         buf[i] = (val >> (j * 8)) & 0xff;
559         }
560
561         return count;
562 }
563
564 static struct kobj_attribute vendor_attribute =
565         __ATTR_RO(vendor);
566 static struct kobj_attribute device_attribute =
567         __ATTR_RO(device);
568 static struct kobj_attribute class_attribute =
569         __ATTR_RO(class);
570
571 static struct attribute *afu_cr_attrs[] = {
572         &vendor_attribute.attr,
573         &device_attribute.attr,
574         &class_attribute.attr,
575         NULL,
576 };
577
578 static void release_afu_config_record(struct kobject *kobj)
579 {
580         struct afu_config_record *cr = to_cr(kobj);
581
582         kfree(cr);
583 }
584
585 static struct kobj_type afu_config_record_type = {
586         .sysfs_ops = &kobj_sysfs_ops,
587         .release = release_afu_config_record,
588         .default_attrs = afu_cr_attrs,
589 };
590
591 static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
592 {
593         struct afu_config_record *cr;
594         int rc;
595
596         cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
597         if (!cr)
598                 return ERR_PTR(-ENOMEM);
599
600         cr->cr = cr_idx;
601
602         rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
603         if (rc)
604                 goto err;
605         rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
606         if (rc)
607                 goto err;
608         rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
609         if (rc)
610                 goto err;
611         cr->class >>= 8;
612
613         /*
614          * Export raw AFU PCIe like config record. For now this is read only by
615          * root - we can expand that later to be readable by non-root and maybe
616          * even writable provided we have a good use-case. Once we support
617          * exposing AFUs through a virtual PHB they will get that for free from
618          * Linux' PCI infrastructure, but until then it's not clear that we
619          * need it for anything since the main use case is just identifying
620          * AFUs, which can be done via the vendor, device and class attributes.
621          */
622         sysfs_bin_attr_init(&cr->config_attr);
623         cr->config_attr.attr.name = "config";
624         cr->config_attr.attr.mode = S_IRUSR;
625         cr->config_attr.size = afu->crs_len;
626         cr->config_attr.read = afu_read_config;
627
628         rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
629                                   &afu->dev.kobj, "cr%i", cr->cr);
630         if (rc)
631                 goto err;
632
633         rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
634         if (rc)
635                 goto err1;
636
637         rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
638         if (rc)
639                 goto err2;
640
641         return cr;
642 err2:
643         sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
644 err1:
645         kobject_put(&cr->kobj);
646         return ERR_PTR(rc);
647 err:
648         kfree(cr);
649         return ERR_PTR(rc);
650 }
651
652 void cxl_sysfs_afu_remove(struct cxl_afu *afu)
653 {
654         struct device_attribute *dev_attr;
655         struct afu_config_record *cr, *tmp;
656         int i;
657
658         /* remove the err buffer bin attribute */
659         if (afu->eb_len)
660                 device_remove_bin_file(&afu->dev, &afu->attr_eb);
661
662         for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
663                 dev_attr = &afu_attrs[i];
664                 if (cxl_ops->support_attributes(dev_attr->attr.name,
665                                                 CXL_AFU_ATTRS))
666                         device_remove_file(&afu->dev, &afu_attrs[i]);
667         }
668
669         list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
670                 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
671                 kobject_put(&cr->kobj);
672         }
673 }
674
675 int cxl_sysfs_afu_add(struct cxl_afu *afu)
676 {
677         struct device_attribute *dev_attr;
678         struct afu_config_record *cr;
679         int i, rc;
680
681         INIT_LIST_HEAD(&afu->crs);
682
683         for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
684                 dev_attr = &afu_attrs[i];
685                 if (cxl_ops->support_attributes(dev_attr->attr.name,
686                                                 CXL_AFU_ATTRS)) {
687                         if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
688                                 goto err;
689                 }
690         }
691
692         /* conditionally create the add the binary file for error info buffer */
693         if (afu->eb_len) {
694                 sysfs_attr_init(&afu->attr_eb.attr);
695
696                 afu->attr_eb.attr.name = "afu_err_buff";
697                 afu->attr_eb.attr.mode = S_IRUGO;
698                 afu->attr_eb.size = afu->eb_len;
699                 afu->attr_eb.read = afu_eb_read;
700
701                 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
702                 if (rc) {
703                         dev_err(&afu->dev,
704                                 "Unable to create eb attr for the afu. Err(%d)\n",
705                                 rc);
706                         goto err;
707                 }
708         }
709
710         for (i = 0; i < afu->crs_num; i++) {
711                 cr = cxl_sysfs_afu_new_cr(afu, i);
712                 if (IS_ERR(cr)) {
713                         rc = PTR_ERR(cr);
714                         goto err1;
715                 }
716                 list_add(&cr->list, &afu->crs);
717         }
718
719         return 0;
720
721 err1:
722         cxl_sysfs_afu_remove(afu);
723         return rc;
724 err:
725         /* reset the eb_len as we havent created the bin attr */
726         afu->eb_len = 0;
727
728         for (i--; i >= 0; i--) {
729                 dev_attr = &afu_attrs[i];
730                 if (cxl_ops->support_attributes(dev_attr->attr.name,
731                                                 CXL_AFU_ATTRS))
732                 device_remove_file(&afu->dev, &afu_attrs[i]);
733         }
734         return rc;
735 }
736
737 int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
738 {
739         struct device_attribute *dev_attr;
740         int i, rc;
741
742         for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
743                 dev_attr = &afu_master_attrs[i];
744                 if (cxl_ops->support_attributes(dev_attr->attr.name,
745                                                 CXL_AFU_MASTER_ATTRS)) {
746                         if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
747                                 goto err;
748                 }
749         }
750
751         return 0;
752
753 err:
754         for (i--; i >= 0; i--) {
755                 dev_attr = &afu_master_attrs[i];
756                 if (cxl_ops->support_attributes(dev_attr->attr.name,
757                                                 CXL_AFU_MASTER_ATTRS))
758                         device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
759         }
760         return rc;
761 }
762
763 void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
764 {
765         struct device_attribute *dev_attr;
766         int i;
767
768         for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
769                 dev_attr = &afu_master_attrs[i];
770                 if (cxl_ops->support_attributes(dev_attr->attr.name,
771                                                 CXL_AFU_MASTER_ATTRS))
772                         device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
773         }
774 }