1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/workqueue.h>
5 #include <linux/debugfs.h>
6 #include <linux/device.h>
7 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/idr.h>
19 * The CXL core provides a set of interfaces that can be consumed by CXL aware
20 * drivers. The interfaces allow for creation, modification, and destruction of
21 * regions, memory devices, ports, and decoders. CXL aware drivers must register
22 * with the CXL core via these interfaces in order to be able to participate in
23 * cross-device interleave coordination. The CXL core also establishes and
24 * maintains the bridge to the nvdimm subsystem.
26 * CXL core introduces sysfs hierarchy to control the devices that are
27 * instantiated by the core.
30 static DEFINE_IDA(cxl_port_ida);
31 static DEFINE_XARRAY(cxl_root_buses);
33 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
36 return sysfs_emit(buf, "%s\n", dev->type->name);
38 static DEVICE_ATTR_RO(devtype);
40 static int cxl_device_id(const struct device *dev)
42 if (dev->type == &cxl_nvdimm_bridge_type)
43 return CXL_DEVICE_NVDIMM_BRIDGE;
44 if (dev->type == &cxl_nvdimm_type)
45 return CXL_DEVICE_NVDIMM;
46 if (dev->type == CXL_PMEM_REGION_TYPE())
47 return CXL_DEVICE_PMEM_REGION;
48 if (dev->type == CXL_DAX_REGION_TYPE())
49 return CXL_DEVICE_DAX_REGION;
50 if (is_cxl_port(dev)) {
51 if (is_cxl_root(to_cxl_port(dev)))
52 return CXL_DEVICE_ROOT;
53 return CXL_DEVICE_PORT;
55 if (is_cxl_memdev(dev))
56 return CXL_DEVICE_MEMORY_EXPANDER;
57 if (dev->type == CXL_REGION_TYPE())
58 return CXL_DEVICE_REGION;
62 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
65 return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev));
67 static DEVICE_ATTR_RO(modalias);
69 static struct attribute *cxl_base_attributes[] = {
70 &dev_attr_devtype.attr,
71 &dev_attr_modalias.attr,
75 struct attribute_group cxl_base_attribute_group = {
76 .attrs = cxl_base_attributes,
79 static ssize_t start_show(struct device *dev, struct device_attribute *attr,
82 struct cxl_decoder *cxld = to_cxl_decoder(dev);
84 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start);
86 static DEVICE_ATTR_ADMIN_RO(start);
88 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
91 struct cxl_decoder *cxld = to_cxl_decoder(dev);
93 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range));
95 static DEVICE_ATTR_RO(size);
97 #define CXL_DECODER_FLAG_ATTR(name, flag) \
98 static ssize_t name##_show(struct device *dev, \
99 struct device_attribute *attr, char *buf) \
101 struct cxl_decoder *cxld = to_cxl_decoder(dev); \
103 return sysfs_emit(buf, "%s\n", \
104 (cxld->flags & (flag)) ? "1" : "0"); \
106 static DEVICE_ATTR_RO(name)
108 CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
109 CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
110 CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
111 CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
112 CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
114 static ssize_t target_type_show(struct device *dev,
115 struct device_attribute *attr, char *buf)
117 struct cxl_decoder *cxld = to_cxl_decoder(dev);
119 switch (cxld->target_type) {
120 case CXL_DECODER_ACCELERATOR:
121 return sysfs_emit(buf, "accelerator\n");
122 case CXL_DECODER_EXPANDER:
123 return sysfs_emit(buf, "expander\n");
127 static DEVICE_ATTR_RO(target_type);
129 static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
131 struct cxl_decoder *cxld = &cxlsd->cxld;
135 for (i = 0; i < cxld->interleave_ways; i++) {
136 struct cxl_dport *dport = cxlsd->target[i];
137 struct cxl_dport *next = NULL;
142 if (i + 1 < cxld->interleave_ways)
143 next = cxlsd->target[i + 1];
144 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
154 static ssize_t target_list_show(struct device *dev,
155 struct device_attribute *attr, char *buf)
157 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
163 seq = read_seqbegin(&cxlsd->target_lock);
164 rc = emit_target_list(cxlsd, buf);
165 } while (read_seqretry(&cxlsd->target_lock, seq));
171 rc = sysfs_emit_at(buf, offset, "\n");
177 static DEVICE_ATTR_RO(target_list);
179 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
182 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
184 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
187 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
188 const char *buf, size_t len)
190 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
191 enum cxl_decoder_mode mode;
194 if (sysfs_streq(buf, "pmem"))
195 mode = CXL_DECODER_PMEM;
196 else if (sysfs_streq(buf, "ram"))
197 mode = CXL_DECODER_RAM;
201 rc = cxl_dpa_set_mode(cxled, mode);
207 static DEVICE_ATTR_RW(mode);
209 static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
212 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
213 u64 base = cxl_dpa_resource_start(cxled);
215 return sysfs_emit(buf, "%#llx\n", base);
217 static DEVICE_ATTR_RO(dpa_resource);
219 static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
222 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
223 resource_size_t size = cxl_dpa_size(cxled);
225 return sysfs_emit(buf, "%pa\n", &size);
228 static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
229 const char *buf, size_t len)
231 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
232 unsigned long long size;
235 rc = kstrtoull(buf, 0, &size);
239 if (!IS_ALIGNED(size, SZ_256M))
242 rc = cxl_dpa_free(cxled);
249 rc = cxl_dpa_alloc(cxled, size);
255 static DEVICE_ATTR_RW(dpa_size);
257 static ssize_t interleave_granularity_show(struct device *dev,
258 struct device_attribute *attr,
261 struct cxl_decoder *cxld = to_cxl_decoder(dev);
263 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity);
266 static DEVICE_ATTR_RO(interleave_granularity);
268 static ssize_t interleave_ways_show(struct device *dev,
269 struct device_attribute *attr, char *buf)
271 struct cxl_decoder *cxld = to_cxl_decoder(dev);
273 return sysfs_emit(buf, "%d\n", cxld->interleave_ways);
276 static DEVICE_ATTR_RO(interleave_ways);
278 static struct attribute *cxl_decoder_base_attrs[] = {
279 &dev_attr_start.attr,
281 &dev_attr_locked.attr,
282 &dev_attr_interleave_granularity.attr,
283 &dev_attr_interleave_ways.attr,
287 static struct attribute_group cxl_decoder_base_attribute_group = {
288 .attrs = cxl_decoder_base_attrs,
291 static struct attribute *cxl_decoder_root_attrs[] = {
292 &dev_attr_cap_pmem.attr,
293 &dev_attr_cap_ram.attr,
294 &dev_attr_cap_type2.attr,
295 &dev_attr_cap_type3.attr,
296 &dev_attr_target_list.attr,
297 SET_CXL_REGION_ATTR(create_pmem_region)
298 SET_CXL_REGION_ATTR(create_ram_region)
299 SET_CXL_REGION_ATTR(delete_region)
303 static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
305 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM;
307 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
310 static bool can_create_ram(struct cxl_root_decoder *cxlrd)
312 unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM;
314 return (cxlrd->cxlsd.cxld.flags & flags) == flags;
317 static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
319 struct device *dev = kobj_to_dev(kobj);
320 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
322 if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd))
325 if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd))
328 if (a == CXL_REGION_ATTR(delete_region) &&
329 !(can_create_pmem(cxlrd) || can_create_ram(cxlrd)))
335 static struct attribute_group cxl_decoder_root_attribute_group = {
336 .attrs = cxl_decoder_root_attrs,
337 .is_visible = cxl_root_decoder_visible,
340 static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
341 &cxl_decoder_root_attribute_group,
342 &cxl_decoder_base_attribute_group,
343 &cxl_base_attribute_group,
347 static struct attribute *cxl_decoder_switch_attrs[] = {
348 &dev_attr_target_type.attr,
349 &dev_attr_target_list.attr,
350 SET_CXL_REGION_ATTR(region)
354 static struct attribute_group cxl_decoder_switch_attribute_group = {
355 .attrs = cxl_decoder_switch_attrs,
358 static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
359 &cxl_decoder_switch_attribute_group,
360 &cxl_decoder_base_attribute_group,
361 &cxl_base_attribute_group,
365 static struct attribute *cxl_decoder_endpoint_attrs[] = {
366 &dev_attr_target_type.attr,
368 &dev_attr_dpa_size.attr,
369 &dev_attr_dpa_resource.attr,
370 SET_CXL_REGION_ATTR(region)
374 static struct attribute_group cxl_decoder_endpoint_attribute_group = {
375 .attrs = cxl_decoder_endpoint_attrs,
378 static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = {
379 &cxl_decoder_base_attribute_group,
380 &cxl_decoder_endpoint_attribute_group,
381 &cxl_base_attribute_group,
385 static void __cxl_decoder_release(struct cxl_decoder *cxld)
387 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
389 ida_free(&port->decoder_ida, cxld->id);
390 put_device(&port->dev);
393 static void cxl_endpoint_decoder_release(struct device *dev)
395 struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
397 __cxl_decoder_release(&cxled->cxld);
401 static void cxl_switch_decoder_release(struct device *dev)
403 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
405 __cxl_decoder_release(&cxlsd->cxld);
409 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
411 if (dev_WARN_ONCE(dev, !is_root_decoder(dev),
412 "not a cxl_root_decoder device\n"))
414 return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
416 EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
418 static void cxl_root_decoder_release(struct device *dev)
420 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
422 if (atomic_read(&cxlrd->region_id) >= 0)
423 memregion_free(atomic_read(&cxlrd->region_id));
424 __cxl_decoder_release(&cxlrd->cxlsd.cxld);
428 static const struct device_type cxl_decoder_endpoint_type = {
429 .name = "cxl_decoder_endpoint",
430 .release = cxl_endpoint_decoder_release,
431 .groups = cxl_decoder_endpoint_attribute_groups,
434 static const struct device_type cxl_decoder_switch_type = {
435 .name = "cxl_decoder_switch",
436 .release = cxl_switch_decoder_release,
437 .groups = cxl_decoder_switch_attribute_groups,
440 static const struct device_type cxl_decoder_root_type = {
441 .name = "cxl_decoder_root",
442 .release = cxl_root_decoder_release,
443 .groups = cxl_decoder_root_attribute_groups,
446 bool is_endpoint_decoder(struct device *dev)
448 return dev->type == &cxl_decoder_endpoint_type;
450 EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
452 bool is_root_decoder(struct device *dev)
454 return dev->type == &cxl_decoder_root_type;
456 EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
458 bool is_switch_decoder(struct device *dev)
460 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
462 EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
464 struct cxl_decoder *to_cxl_decoder(struct device *dev)
466 if (dev_WARN_ONCE(dev,
467 !is_switch_decoder(dev) && !is_endpoint_decoder(dev),
468 "not a cxl_decoder device\n"))
470 return container_of(dev, struct cxl_decoder, dev);
472 EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
474 struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
476 if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev),
477 "not a cxl_endpoint_decoder device\n"))
479 return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
481 EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
483 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
485 if (dev_WARN_ONCE(dev, !is_switch_decoder(dev),
486 "not a cxl_switch_decoder device\n"))
488 return container_of(dev, struct cxl_switch_decoder, cxld.dev);
490 EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
492 static void cxl_ep_release(struct cxl_ep *ep)
498 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
502 xa_erase(&port->endpoints, (unsigned long) ep->ep);
506 static void cxl_port_release(struct device *dev)
508 struct cxl_port *port = to_cxl_port(dev);
512 xa_for_each(&port->endpoints, index, ep)
513 cxl_ep_remove(port, ep);
514 xa_destroy(&port->endpoints);
515 xa_destroy(&port->dports);
516 xa_destroy(&port->regions);
517 ida_free(&cxl_port_ida, port->id);
521 static const struct attribute_group *cxl_port_attribute_groups[] = {
522 &cxl_base_attribute_group,
526 static const struct device_type cxl_port_type = {
528 .release = cxl_port_release,
529 .groups = cxl_port_attribute_groups,
532 bool is_cxl_port(const struct device *dev)
534 return dev->type == &cxl_port_type;
536 EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
538 struct cxl_port *to_cxl_port(const struct device *dev)
540 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
541 "not a cxl_port device\n"))
543 return container_of(dev, struct cxl_port, dev);
545 EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
547 static void unregister_port(void *_port)
549 struct cxl_port *port = _port;
550 struct cxl_port *parent;
551 struct device *lock_dev;
553 if (is_cxl_root(port))
556 parent = to_cxl_port(port->dev.parent);
559 * CXL root port's and the first level of ports are unregistered
560 * under the platform firmware device lock, all other ports are
561 * unregistered while holding their parent port lock.
564 lock_dev = port->uport;
565 else if (is_cxl_root(parent))
566 lock_dev = parent->uport;
568 lock_dev = &parent->dev;
570 device_lock_assert(lock_dev);
572 device_unregister(&port->dev);
575 static void cxl_unlink_uport(void *_port)
577 struct cxl_port *port = _port;
579 sysfs_remove_link(&port->dev.kobj, "uport");
582 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
586 rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
589 return devm_add_action_or_reset(host, cxl_unlink_uport, port);
592 static void cxl_unlink_parent_dport(void *_port)
594 struct cxl_port *port = _port;
596 sysfs_remove_link(&port->dev.kobj, "parent_dport");
599 static int devm_cxl_link_parent_dport(struct device *host,
600 struct cxl_port *port,
601 struct cxl_dport *parent_dport)
608 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport->kobj,
612 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port);
615 static struct lock_class_key cxl_port_key;
617 static struct cxl_port *cxl_port_alloc(struct device *uport,
618 resource_size_t component_reg_phys,
619 struct cxl_dport *parent_dport)
621 struct cxl_port *port;
625 port = kzalloc(sizeof(*port), GFP_KERNEL);
627 return ERR_PTR(-ENOMEM);
629 rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
636 * The top-level cxl_port "cxl_root" does not have a cxl_port as
637 * its parent and it does not have any corresponding component
638 * registers as its decode is described by a fixed platform
643 struct cxl_port *parent_port = parent_dport->port;
644 struct cxl_port *iter;
646 dev->parent = &parent_port->dev;
647 port->depth = parent_port->depth + 1;
648 port->parent_dport = parent_dport;
651 * walk to the host bridge, or the first ancestor that knows
655 while (!iter->host_bridge &&
656 !is_cxl_root(to_cxl_port(iter->dev.parent)))
657 iter = to_cxl_port(iter->dev.parent);
658 if (iter->host_bridge)
659 port->host_bridge = iter->host_bridge;
660 else if (parent_dport->rch)
661 port->host_bridge = parent_dport->dport;
663 port->host_bridge = iter->uport;
664 dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge));
668 port->component_reg_phys = component_reg_phys;
669 ida_init(&port->decoder_ida);
671 port->commit_end = -1;
672 xa_init(&port->dports);
673 xa_init(&port->endpoints);
674 xa_init(&port->regions);
676 device_initialize(dev);
677 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
678 device_set_pm_not_required(dev);
679 dev->bus = &cxl_bus_type;
680 dev->type = &cxl_port_type;
689 static struct cxl_port *__devm_cxl_add_port(struct device *host,
690 struct device *uport,
691 resource_size_t component_reg_phys,
692 struct cxl_dport *parent_dport)
694 struct cxl_port *port;
698 port = cxl_port_alloc(uport, component_reg_phys, parent_dport);
703 if (is_cxl_memdev(uport))
704 rc = dev_set_name(dev, "endpoint%d", port->id);
705 else if (parent_dport)
706 rc = dev_set_name(dev, "port%d", port->id);
708 rc = dev_set_name(dev, "root%d", port->id);
712 rc = device_add(dev);
716 rc = devm_add_action_or_reset(host, unregister_port, port);
720 rc = devm_cxl_link_uport(host, port);
724 rc = devm_cxl_link_parent_dport(host, port, parent_dport);
736 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
737 * @host: host device for devm operations
738 * @uport: "physical" device implementing this upstream port
739 * @component_reg_phys: (optional) for configurable cxl_port instances
740 * @parent_dport: next hop up in the CXL memory decode hierarchy
742 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
743 resource_size_t component_reg_phys,
744 struct cxl_dport *parent_dport)
746 struct cxl_port *port, *parent_port;
748 port = __devm_cxl_add_port(host, uport, component_reg_phys,
751 parent_port = parent_dport ? parent_dport->port : NULL;
753 dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n",
754 dev_name(&port->dev),
755 parent_port ? " to " : "",
756 parent_port ? dev_name(&parent_port->dev) : "",
757 parent_port ? "" : " (root port)",
760 dev_dbg(uport, "%s added%s%s%s\n",
761 dev_name(&port->dev),
762 parent_port ? " to " : "",
763 parent_port ? dev_name(&parent_port->dev) : "",
764 parent_port ? "" : " (root port)");
769 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
771 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
773 /* There is no pci_bus associated with a CXL platform-root port */
774 if (is_cxl_root(port))
777 if (dev_is_pci(port->uport)) {
778 struct pci_dev *pdev = to_pci_dev(port->uport);
780 return pdev->subordinate;
783 return xa_load(&cxl_root_buses, (unsigned long)port->uport);
785 EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
787 static void unregister_pci_bus(void *uport)
789 xa_erase(&cxl_root_buses, (unsigned long)uport);
792 int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
797 if (dev_is_pci(uport))
800 rc = xa_insert(&cxl_root_buses, (unsigned long)uport, bus, GFP_KERNEL);
803 return devm_add_action_or_reset(host, unregister_pci_bus, uport);
805 EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
807 static bool dev_is_cxl_root_child(struct device *dev)
809 struct cxl_port *port, *parent;
811 if (!is_cxl_port(dev))
814 port = to_cxl_port(dev);
815 if (is_cxl_root(port))
818 parent = to_cxl_port(port->dev.parent);
819 if (is_cxl_root(parent))
825 struct cxl_port *find_cxl_root(struct cxl_port *port)
827 struct cxl_port *iter = port;
829 while (iter && !is_cxl_root(iter))
830 iter = to_cxl_port(iter->dev.parent);
834 get_device(&iter->dev);
837 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
839 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
841 struct cxl_dport *dport;
844 device_lock_assert(&port->dev);
845 xa_for_each(&port->dports, index, dport)
846 if (dport->port_id == id)
851 static int add_dport(struct cxl_port *port, struct cxl_dport *new)
853 struct cxl_dport *dup;
856 device_lock_assert(&port->dev);
857 dup = find_dport(port, new->port_id);
860 "unable to add dport%d-%s non-unique port id (%s)\n",
861 new->port_id, dev_name(new->dport),
862 dev_name(dup->dport));
866 rc = xa_insert(&port->dports, (unsigned long)new->dport, new,
876 * Since root-level CXL dports cannot be enumerated by PCI they are not
877 * enumerated by the common port driver that acquires the port lock over
878 * dport add/remove. Instead, root dports are manually added by a
879 * platform driver and cond_cxl_root_lock() is used to take the missing
880 * port lock in that case.
882 static void cond_cxl_root_lock(struct cxl_port *port)
884 if (is_cxl_root(port))
885 device_lock(&port->dev);
888 static void cond_cxl_root_unlock(struct cxl_port *port)
890 if (is_cxl_root(port))
891 device_unlock(&port->dev);
894 static void cxl_dport_remove(void *data)
896 struct cxl_dport *dport = data;
897 struct cxl_port *port = dport->port;
899 xa_erase(&port->dports, (unsigned long) dport->dport);
900 put_device(dport->dport);
903 static void cxl_dport_unlink(void *data)
905 struct cxl_dport *dport = data;
906 struct cxl_port *port = dport->port;
907 char link_name[CXL_TARGET_STRLEN];
909 sprintf(link_name, "dport%d", dport->port_id);
910 sysfs_remove_link(&port->dev.kobj, link_name);
913 static struct cxl_dport *
914 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
915 int port_id, resource_size_t component_reg_phys,
916 resource_size_t rcrb)
918 char link_name[CXL_TARGET_STRLEN];
919 struct cxl_dport *dport;
923 if (is_cxl_root(port))
929 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n",
930 dev_name(dport_dev));
931 return ERR_PTR(-ENXIO);
934 if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
936 return ERR_PTR(-EINVAL);
938 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL);
940 return ERR_PTR(-ENOMEM);
942 dport->dport = dport_dev;
943 dport->port_id = port_id;
944 dport->component_reg_phys = component_reg_phys;
946 if (rcrb != CXL_RESOURCE_NONE)
950 cond_cxl_root_lock(port);
951 rc = add_dport(port, dport);
952 cond_cxl_root_unlock(port);
956 get_device(dport_dev);
957 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
961 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
965 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport);
973 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
974 * @port: the cxl_port that references this dport
975 * @dport_dev: firmware or PCI device representing the dport
976 * @port_id: identifier for this dport in a decoder's target list
977 * @component_reg_phys: optional location of CXL component registers
979 * Note that dports are appended to the devm release action's of the
980 * either the port's host (for root ports), or the port itself (for
983 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
984 struct device *dport_dev, int port_id,
985 resource_size_t component_reg_phys)
987 struct cxl_dport *dport;
989 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
990 component_reg_phys, CXL_RESOURCE_NONE);
992 dev_dbg(dport_dev, "failed to add dport to %s: %ld\n",
993 dev_name(&port->dev), PTR_ERR(dport));
995 dev_dbg(dport_dev, "dport added to %s\n",
996 dev_name(&port->dev));
1001 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
1004 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1005 * @port: the cxl_port that references this dport
1006 * @dport_dev: firmware or PCI device representing the dport
1007 * @port_id: identifier for this dport in a decoder's target list
1008 * @component_reg_phys: optional location of CXL component registers
1009 * @rcrb: mandatory location of a Root Complex Register Block
1011 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
1013 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
1014 struct device *dport_dev, int port_id,
1015 resource_size_t component_reg_phys,
1016 resource_size_t rcrb)
1018 struct cxl_dport *dport;
1020 if (rcrb == CXL_RESOURCE_NONE) {
1021 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n");
1022 return ERR_PTR(-EINVAL);
1025 dport = __devm_cxl_add_dport(port, dport_dev, port_id,
1026 component_reg_phys, rcrb);
1027 if (IS_ERR(dport)) {
1028 dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n",
1029 dev_name(&port->dev), PTR_ERR(dport));
1031 dev_dbg(dport_dev, "RCH dport added to %s\n",
1032 dev_name(&port->dev));
1037 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
1039 static int add_ep(struct cxl_ep *new)
1041 struct cxl_port *port = new->dport->port;
1044 device_lock(&port->dev);
1046 device_unlock(&port->dev);
1049 rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
1051 device_unlock(&port->dev);
1057 * cxl_add_ep - register an endpoint's interest in a port
1058 * @dport: the dport that routes to @ep_dev
1059 * @ep_dev: device representing the endpoint
1061 * Intermediate CXL ports are scanned based on the arrival of endpoints.
1062 * When those endpoints depart the port can be destroyed once all
1063 * endpoints that care about that port have been removed.
1065 static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
1070 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
1074 ep->ep = get_device(ep_dev);
1083 struct cxl_find_port_ctx {
1084 const struct device *dport_dev;
1085 const struct cxl_port *parent_port;
1086 struct cxl_dport **dport;
1089 static int match_port_by_dport(struct device *dev, const void *data)
1091 const struct cxl_find_port_ctx *ctx = data;
1092 struct cxl_dport *dport;
1093 struct cxl_port *port;
1095 if (!is_cxl_port(dev))
1097 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev)
1100 port = to_cxl_port(dev);
1101 dport = cxl_find_dport_by_dev(port, ctx->dport_dev);
1103 *ctx->dport = dport;
1104 return dport != NULL;
1107 static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
1111 if (!ctx->dport_dev)
1114 dev = bus_find_device(&cxl_bus_type, NULL, ctx, match_port_by_dport);
1116 return to_cxl_port(dev);
1120 static struct cxl_port *find_cxl_port(struct device *dport_dev,
1121 struct cxl_dport **dport)
1123 struct cxl_find_port_ctx ctx = {
1124 .dport_dev = dport_dev,
1127 struct cxl_port *port;
1129 port = __find_cxl_port(&ctx);
1133 static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
1134 struct device *dport_dev,
1135 struct cxl_dport **dport)
1137 struct cxl_find_port_ctx ctx = {
1138 .dport_dev = dport_dev,
1139 .parent_port = parent_port,
1142 struct cxl_port *port;
1144 port = __find_cxl_port(&ctx);
1149 * All users of grandparent() are using it to walk PCIe-like switch port
1150 * hierarchy. A PCIe switch is comprised of a bridge device representing the
1151 * upstream switch port and N bridges representing downstream switch ports. When
1152 * bridges stack the grand-parent of a downstream switch port is another
1153 * downstream switch port in the immediate ancestor switch.
1155 static struct device *grandparent(struct device *dev)
1157 if (dev && dev->parent)
1158 return dev->parent->parent;
1162 static void delete_endpoint(void *data)
1164 struct cxl_memdev *cxlmd = data;
1165 struct cxl_port *endpoint = dev_get_drvdata(&cxlmd->dev);
1166 struct cxl_port *parent_port;
1167 struct device *parent;
1169 parent_port = cxl_mem_find_port(cxlmd, NULL);
1172 parent = &parent_port->dev;
1174 device_lock(parent);
1175 if (parent->driver && !endpoint->dead) {
1176 devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
1177 devm_release_action(parent, cxl_unlink_uport, endpoint);
1178 devm_release_action(parent, unregister_port, endpoint);
1180 device_unlock(parent);
1183 put_device(&endpoint->dev);
1186 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
1188 struct device *dev = &cxlmd->dev;
1190 get_device(&endpoint->dev);
1191 dev_set_drvdata(dev, endpoint);
1192 cxlmd->depth = endpoint->depth;
1193 return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
1195 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
1198 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1199 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1200 * for a port to be unregistered is when all memdevs beneath that port have gone
1201 * through ->remove(). This "bottom-up" removal selectively removes individual
1202 * child ports manually. This depends on devm_cxl_add_port() to not change is
1203 * devm action registration order, and for dports to have already been
1204 * destroyed by reap_dports().
1206 static void delete_switch_port(struct cxl_port *port)
1208 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port);
1209 devm_release_action(port->dev.parent, cxl_unlink_uport, port);
1210 devm_release_action(port->dev.parent, unregister_port, port);
1213 static void reap_dports(struct cxl_port *port)
1215 struct cxl_dport *dport;
1216 unsigned long index;
1218 device_lock_assert(&port->dev);
1220 xa_for_each(&port->dports, index, dport) {
1221 devm_release_action(&port->dev, cxl_dport_unlink, dport);
1222 devm_release_action(&port->dev, cxl_dport_remove, dport);
1223 devm_kfree(&port->dev, dport);
1228 struct cxl_memdev *cxlmd;
1232 static int port_has_memdev(struct device *dev, const void *data)
1234 const struct detach_ctx *ctx = data;
1235 struct cxl_port *port;
1237 if (!is_cxl_port(dev))
1240 port = to_cxl_port(dev);
1241 if (port->depth != ctx->depth)
1244 return !!cxl_ep_load(port, ctx->cxlmd);
1247 static void cxl_detach_ep(void *data)
1249 struct cxl_memdev *cxlmd = data;
1251 for (int i = cxlmd->depth - 1; i >= 1; i--) {
1252 struct cxl_port *port, *parent_port;
1253 struct detach_ctx ctx = {
1261 dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
1265 port = to_cxl_port(dev);
1267 parent_port = to_cxl_port(port->dev.parent);
1268 device_lock(&parent_port->dev);
1269 device_lock(&port->dev);
1270 ep = cxl_ep_load(port, cxlmd);
1271 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
1272 ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
1273 cxl_ep_remove(port, ep);
1274 if (ep && !port->dead && xa_empty(&port->endpoints) &&
1275 !is_cxl_root(parent_port) && parent_port->dev.driver) {
1277 * This was the last ep attached to a dynamically
1278 * enumerated port. Block new cxl_add_ep() and garbage
1285 device_unlock(&port->dev);
1288 dev_dbg(&cxlmd->dev, "delete %s\n",
1289 dev_name(&port->dev));
1290 delete_switch_port(port);
1292 put_device(&port->dev);
1293 device_unlock(&parent_port->dev);
1297 static resource_size_t find_component_registers(struct device *dev)
1299 struct cxl_register_map map;
1300 struct pci_dev *pdev;
1303 * Theoretically, CXL component registers can be hosted on a
1304 * non-PCI device, in practice, only cxl_test hits this case.
1306 if (!dev_is_pci(dev))
1307 return CXL_RESOURCE_NONE;
1309 pdev = to_pci_dev(dev);
1311 cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
1312 return map.resource;
1315 static int add_port_attach_ep(struct cxl_memdev *cxlmd,
1316 struct device *uport_dev,
1317 struct device *dport_dev)
1319 struct device *dparent = grandparent(dport_dev);
1320 struct cxl_port *port, *parent_port = NULL;
1321 struct cxl_dport *dport, *parent_dport;
1322 resource_size_t component_reg_phys;
1327 * The iteration reached the topology root without finding the
1328 * CXL-root 'cxl_port' on a previous iteration, fail for now to
1329 * be re-probed after platform driver attaches.
1331 dev_dbg(&cxlmd->dev, "%s is a root dport\n",
1332 dev_name(dport_dev));
1336 parent_port = find_cxl_port(dparent, &parent_dport);
1338 /* iterate to create this parent_port */
1342 device_lock(&parent_port->dev);
1343 if (!parent_port->dev.driver) {
1344 dev_warn(&cxlmd->dev,
1345 "port %s:%s disabled, failed to enumerate CXL.mem\n",
1346 dev_name(&parent_port->dev), dev_name(uport_dev));
1347 port = ERR_PTR(-ENXIO);
1351 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1353 component_reg_phys = find_component_registers(uport_dev);
1354 port = devm_cxl_add_port(&parent_port->dev, uport_dev,
1355 component_reg_phys, parent_dport);
1356 /* retry find to pick up the new dport information */
1358 port = find_cxl_port_at(parent_port, dport_dev, &dport);
1361 device_unlock(&parent_port->dev);
1366 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
1367 dev_name(&port->dev), dev_name(port->uport));
1368 rc = cxl_add_ep(dport, &cxlmd->dev);
1371 * "can't" happen, but this error code means
1372 * something to the caller, so translate it.
1376 put_device(&port->dev);
1379 put_device(&parent_port->dev);
1383 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
1385 struct device *dev = &cxlmd->dev;
1386 struct device *iter;
1390 * Skip intermediate port enumeration in the RCH case, there
1391 * are no ports in between a host bridge and an endpoint.
1393 if (cxlmd->cxlds->rcd)
1396 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd);
1401 * Scan for and add all cxl_ports in this device's ancestry.
1402 * Repeat until no more ports are added. Abort if a port add
1406 for (iter = dev; iter; iter = grandparent(iter)) {
1407 struct device *dport_dev = grandparent(iter);
1408 struct device *uport_dev;
1409 struct cxl_dport *dport;
1410 struct cxl_port *port;
1415 uport_dev = dport_dev->parent;
1417 dev_warn(dev, "at %s no parent for dport: %s\n",
1418 dev_name(iter), dev_name(dport_dev));
1422 dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
1423 dev_name(iter), dev_name(dport_dev),
1424 dev_name(uport_dev));
1425 port = find_cxl_port(dport_dev, &dport);
1427 dev_dbg(&cxlmd->dev,
1428 "found already registered port %s:%s\n",
1429 dev_name(&port->dev), dev_name(port->uport));
1430 rc = cxl_add_ep(dport, &cxlmd->dev);
1433 * If the endpoint already exists in the port's list,
1434 * that's ok, it was added on a previous pass.
1435 * Otherwise, retry in add_port_attach_ep() after taking
1436 * the parent_port lock as the current port may be being
1439 if (rc && rc != -EBUSY) {
1440 put_device(&port->dev);
1444 /* Any more ports to add between this one and the root? */
1445 if (!dev_is_cxl_root_child(&port->dev)) {
1446 put_device(&port->dev);
1450 put_device(&port->dev);
1454 rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev);
1455 /* port missing, try to add parent */
1458 /* failed to add ep or port */
1461 /* port added, new descendants possible, start over */
1467 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
1469 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
1470 struct cxl_dport **dport)
1472 return find_cxl_port(grandparent(&cxlmd->dev), dport);
1474 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
1476 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
1477 struct cxl_port *port, int *target_map)
1484 device_lock_assert(&port->dev);
1486 if (xa_empty(&port->dports))
1489 write_seqlock(&cxlsd->target_lock);
1490 for (i = 0; i < cxlsd->nr_targets; i++) {
1491 struct cxl_dport *dport = find_dport(port, target_map[i]);
1497 cxlsd->target[i] = dport;
1499 write_sequnlock(&cxlsd->target_lock);
1504 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
1506 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
1507 struct cxl_decoder *cxld = &cxlsd->cxld;
1510 iw = cxld->interleave_ways;
1511 if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets,
1512 "misconfigured root decoder\n"))
1515 return cxlrd->cxlsd.target[pos % iw];
1517 EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL);
1519 static struct lock_class_key cxl_decoder_key;
1522 * cxl_decoder_init - Common decoder setup / initialization
1523 * @port: owning port of this decoder
1524 * @cxld: common decoder properties to initialize
1526 * A port may contain one or more decoders. Each of those decoders
1527 * enable some address space for CXL.mem utilization. A decoder is
1528 * expected to be configured by the caller before registering via
1531 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
1536 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
1540 /* need parent to stick around to release the id */
1541 get_device(&port->dev);
1545 device_initialize(dev);
1546 lockdep_set_class(&dev->mutex, &cxl_decoder_key);
1547 device_set_pm_not_required(dev);
1548 dev->parent = &port->dev;
1549 dev->bus = &cxl_bus_type;
1551 /* Pre initialize an "empty" decoder */
1552 cxld->interleave_ways = 1;
1553 cxld->interleave_granularity = PAGE_SIZE;
1554 cxld->target_type = CXL_DECODER_EXPANDER;
1555 cxld->hpa_range = (struct range) {
1563 static int cxl_switch_decoder_init(struct cxl_port *port,
1564 struct cxl_switch_decoder *cxlsd,
1567 if (nr_targets > CXL_DECODER_MAX_INTERLEAVE)
1570 cxlsd->nr_targets = nr_targets;
1571 seqlock_init(&cxlsd->target_lock);
1572 return cxl_decoder_init(port, &cxlsd->cxld);
1576 * cxl_root_decoder_alloc - Allocate a root level decoder
1577 * @port: owning CXL root of this decoder
1578 * @nr_targets: static number of downstream targets
1579 * @calc_hb: which host bridge covers the n'th position by granularity
1581 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1582 * 'CXL root' decoder is one that decodes from a top-level / static platform
1583 * firmware description of CXL resources into a CXL standard decode
1586 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
1587 unsigned int nr_targets,
1588 cxl_calc_hb_fn calc_hb)
1590 struct cxl_root_decoder *cxlrd;
1591 struct cxl_switch_decoder *cxlsd;
1592 struct cxl_decoder *cxld;
1595 if (!is_cxl_root(port))
1596 return ERR_PTR(-EINVAL);
1598 cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets),
1601 return ERR_PTR(-ENOMEM);
1603 cxlsd = &cxlrd->cxlsd;
1604 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1610 cxlrd->calc_hb = calc_hb;
1611 mutex_init(&cxlrd->range_lock);
1613 cxld = &cxlsd->cxld;
1614 cxld->dev.type = &cxl_decoder_root_type;
1616 * cxl_root_decoder_release() special cases negative ids to
1617 * detect memregion_alloc() failures.
1619 atomic_set(&cxlrd->region_id, -1);
1620 rc = memregion_alloc(GFP_KERNEL);
1622 put_device(&cxld->dev);
1626 atomic_set(&cxlrd->region_id, rc);
1629 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
1632 * cxl_switch_decoder_alloc - Allocate a switch level decoder
1633 * @port: owning CXL switch port of this decoder
1634 * @nr_targets: max number of dynamically addressable downstream targets
1636 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
1637 * 'switch' decoder is any decoder that can be enumerated by PCIe
1638 * topology and the HDM Decoder Capability. This includes the decoders
1639 * that sit between Switch Upstream Ports / Switch Downstream Ports and
1640 * Host Bridges / Root Ports.
1642 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
1643 unsigned int nr_targets)
1645 struct cxl_switch_decoder *cxlsd;
1646 struct cxl_decoder *cxld;
1649 if (is_cxl_root(port) || is_cxl_endpoint(port))
1650 return ERR_PTR(-EINVAL);
1652 cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL);
1654 return ERR_PTR(-ENOMEM);
1656 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets);
1662 cxld = &cxlsd->cxld;
1663 cxld->dev.type = &cxl_decoder_switch_type;
1666 EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
1669 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1670 * @port: owning port of this decoder
1672 * Return: A new cxl decoder to be registered by cxl_decoder_add()
1674 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
1676 struct cxl_endpoint_decoder *cxled;
1677 struct cxl_decoder *cxld;
1680 if (!is_cxl_endpoint(port))
1681 return ERR_PTR(-EINVAL);
1683 cxled = kzalloc(sizeof(*cxled), GFP_KERNEL);
1685 return ERR_PTR(-ENOMEM);
1688 cxld = &cxled->cxld;
1689 rc = cxl_decoder_init(port, cxld);
1695 cxld->dev.type = &cxl_decoder_endpoint_type;
1698 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
1701 * cxl_decoder_add_locked - Add a decoder with targets
1702 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1703 * @target_map: A list of downstream ports that this decoder can direct memory
1704 * traffic to. These numbers should correspond with the port number
1705 * in the PCIe Link Capabilities structure.
1707 * Certain types of decoders may not have any targets. The main example of this
1708 * is an endpoint device. A more awkward example is a hostbridge whose root
1709 * ports get hot added (technically possible, though unlikely).
1711 * This is the locked variant of cxl_decoder_add().
1713 * Context: Process context. Expects the device lock of the port that owns the
1716 * Return: Negative error code if the decoder wasn't properly configured; else
1719 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
1721 struct cxl_port *port;
1725 if (WARN_ON_ONCE(!cxld))
1728 if (WARN_ON_ONCE(IS_ERR(cxld)))
1729 return PTR_ERR(cxld);
1731 if (cxld->interleave_ways < 1)
1736 port = to_cxl_port(cxld->dev.parent);
1737 if (!is_endpoint_decoder(dev)) {
1738 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
1740 rc = decoder_populate_targets(cxlsd, port, target_map);
1741 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
1743 "Failed to populate active decoder targets\n");
1748 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
1752 return device_add(dev);
1754 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
1757 * cxl_decoder_add - Add a decoder with targets
1758 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
1759 * @target_map: A list of downstream ports that this decoder can direct memory
1760 * traffic to. These numbers should correspond with the port number
1761 * in the PCIe Link Capabilities structure.
1763 * This is the unlocked variant of cxl_decoder_add_locked().
1764 * See cxl_decoder_add_locked().
1766 * Context: Process context. Takes and releases the device lock of the port that
1769 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
1771 struct cxl_port *port;
1774 if (WARN_ON_ONCE(!cxld))
1777 if (WARN_ON_ONCE(IS_ERR(cxld)))
1778 return PTR_ERR(cxld);
1780 port = to_cxl_port(cxld->dev.parent);
1782 device_lock(&port->dev);
1783 rc = cxl_decoder_add_locked(cxld, target_map);
1784 device_unlock(&port->dev);
1788 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
1790 static void cxld_unregister(void *dev)
1792 struct cxl_endpoint_decoder *cxled;
1794 if (is_endpoint_decoder(dev)) {
1795 cxled = to_cxl_endpoint_decoder(dev);
1796 cxl_decoder_kill_region(cxled);
1799 device_unregister(dev);
1802 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
1804 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
1806 EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
1809 * __cxl_driver_register - register a driver for the cxl bus
1810 * @cxl_drv: cxl driver structure to attach
1811 * @owner: owning module/driver
1812 * @modname: KBUILD_MODNAME for parent driver
1814 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
1815 const char *modname)
1817 if (!cxl_drv->probe) {
1818 pr_debug("%s ->probe() must be specified\n", modname);
1822 if (!cxl_drv->name) {
1823 pr_debug("%s ->name must be specified\n", modname);
1828 pr_debug("%s ->id must be specified\n", modname);
1832 cxl_drv->drv.bus = &cxl_bus_type;
1833 cxl_drv->drv.owner = owner;
1834 cxl_drv->drv.mod_name = modname;
1835 cxl_drv->drv.name = cxl_drv->name;
1837 return driver_register(&cxl_drv->drv);
1839 EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
1841 void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1843 driver_unregister(&cxl_drv->drv);
1845 EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
1847 static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
1849 return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1850 cxl_device_id(dev));
1853 static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1855 return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1858 static int cxl_bus_probe(struct device *dev)
1862 rc = to_cxl_drv(dev->driver)->probe(dev);
1863 dev_dbg(dev, "probe: %d\n", rc);
1867 static void cxl_bus_remove(struct device *dev)
1869 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1871 if (cxl_drv->remove)
1872 cxl_drv->remove(dev);
1875 static struct workqueue_struct *cxl_bus_wq;
1877 static void cxl_bus_rescan_queue(struct work_struct *w)
1879 int rc = bus_rescan_devices(&cxl_bus_type);
1881 pr_debug("CXL bus rescan result: %d\n", rc);
1884 void cxl_bus_rescan(void)
1886 static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue);
1888 queue_work(cxl_bus_wq, &rescan_work);
1890 EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
1892 void cxl_bus_drain(void)
1894 drain_workqueue(cxl_bus_wq);
1896 EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
1898 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
1900 return queue_work(cxl_bus_wq, &cxlmd->detach_work);
1902 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
1904 /* for user tooling to ensure port disable work has completed */
1905 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
1907 if (sysfs_streq(buf, "1")) {
1908 flush_workqueue(cxl_bus_wq);
1915 static BUS_ATTR_WO(flush);
1917 static struct attribute *cxl_bus_attributes[] = {
1918 &bus_attr_flush.attr,
1922 static struct attribute_group cxl_bus_attribute_group = {
1923 .attrs = cxl_bus_attributes,
1926 static const struct attribute_group *cxl_bus_attribute_groups[] = {
1927 &cxl_bus_attribute_group,
1931 struct bus_type cxl_bus_type = {
1933 .uevent = cxl_bus_uevent,
1934 .match = cxl_bus_match,
1935 .probe = cxl_bus_probe,
1936 .remove = cxl_bus_remove,
1937 .bus_groups = cxl_bus_attribute_groups,
1939 EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
1941 static struct dentry *cxl_debugfs;
1943 struct dentry *cxl_debugfs_create_dir(const char *dir)
1945 return debugfs_create_dir(dir, cxl_debugfs);
1947 EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
1949 static __init int cxl_core_init(void)
1953 cxl_debugfs = debugfs_create_dir("cxl", NULL);
1957 rc = cxl_memdev_init();
1961 cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0);
1967 rc = bus_register(&cxl_bus_type);
1971 rc = cxl_region_init();
1978 bus_unregister(&cxl_bus_type);
1980 destroy_workqueue(cxl_bus_wq);
1986 static void cxl_core_exit(void)
1989 bus_unregister(&cxl_bus_type);
1990 destroy_workqueue(cxl_bus_wq);
1992 debugfs_remove_recursive(cxl_debugfs);
1995 subsys_initcall(cxl_core_init);
1996 module_exit(cxl_core_exit);
1997 MODULE_LICENSE("GPL v2");