1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/units.h>
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
8 #include <linux/pci-doe.h>
19 * Compute Express Link protocols are layered on top of PCIe. CXL core provides
20 * a set of helpers for CXL interactions which occur via PCIe.
23 static unsigned short media_ready_timeout = 60;
24 module_param(media_ready_timeout, ushort, 0644);
25 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
27 struct cxl_walk_context {
29 struct cxl_port *port;
35 static int match_add_dports(struct pci_dev *pdev, void *data)
37 struct cxl_walk_context *ctx = data;
38 struct cxl_port *port = ctx->port;
39 int type = pci_pcie_type(pdev);
40 struct cxl_register_map map;
41 struct cxl_dport *dport;
45 if (pdev->bus != ctx->bus)
47 if (!pci_is_pcie(pdev))
49 if (type != ctx->type)
51 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
55 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
57 dev_dbg(&port->dev, "failed to find component registers\n");
59 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
60 dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
62 ctx->error = PTR_ERR(dport);
63 return PTR_ERR(dport);
71 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
72 * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
74 * Returns a positive number of dports enumerated or a negative error
77 int devm_cxl_port_enumerate_dports(struct cxl_port *port)
79 struct pci_bus *bus = cxl_port_to_pci_bus(port);
80 struct cxl_walk_context ctx;
86 if (pci_is_root_bus(bus))
87 type = PCI_EXP_TYPE_ROOT_PORT;
89 type = PCI_EXP_TYPE_DOWNSTREAM;
91 ctx = (struct cxl_walk_context) {
96 pci_walk_bus(bus, match_add_dports, &ctx);
104 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
106 static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
108 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
109 int d = cxlds->cxl_dvsec;
114 if (id > CXL_DVSEC_RANGE_MAX)
117 /* Check MEM INFO VALID bit first, give up after 1s */
120 rc = pci_read_config_dword(pdev,
121 d + CXL_DVSEC_RANGE_SIZE_LOW(id),
126 valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
134 "Timeout awaiting memory range %d valid after 1s.\n",
142 static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
144 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
145 int d = cxlds->cxl_dvsec;
150 if (id > CXL_DVSEC_RANGE_MAX)
153 /* Check MEM ACTIVE bit, up to 60s timeout by default */
154 for (i = media_ready_timeout; i; i--) {
155 rc = pci_read_config_dword(
156 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
160 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
168 "timeout awaiting memory active after %d seconds\n",
169 media_ready_timeout);
177 * Wait up to @media_ready_timeout for the device to report memory
180 int cxl_await_media_ready(struct cxl_dev_state *cxlds)
182 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
183 int d = cxlds->cxl_dvsec;
184 int rc, i, hdm_count;
188 rc = pci_read_config_word(pdev,
189 d + CXL_DVSEC_CAP_OFFSET, &cap);
193 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
194 for (i = 0; i < hdm_count; i++) {
195 rc = cxl_dvsec_mem_range_valid(cxlds, i);
200 for (i = 0; i < hdm_count; i++) {
201 rc = cxl_dvsec_mem_range_active(cxlds, i);
206 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
207 if (!CXLMDEV_READY(md_status))
212 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
214 static int wait_for_valid(struct pci_dev *pdev, int d)
220 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
221 * and Size Low registers are valid. Must be set within 1 second of
222 * deassertion of reset to CXL device. Likely it is already set by the
223 * time this runs, but otherwise give a 1.5 second timeout in case of
226 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
230 if (val & CXL_DVSEC_MEM_INFO_VALID)
235 rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
239 if (val & CXL_DVSEC_MEM_INFO_VALID)
245 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
247 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
248 int d = cxlds->cxl_dvsec;
252 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
256 if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
258 ctrl &= ~CXL_DVSEC_MEM_ENABLE;
261 rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
268 static void clear_mem_enable(void *cxlds)
270 cxl_set_mem_enable(cxlds, 0);
273 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
277 rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
282 return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
285 /* require dvsec ranges to be covered by a locked platform window */
286 static int dvsec_range_allowed(struct device *dev, void *arg)
288 struct range *dev_range = arg;
289 struct cxl_decoder *cxld;
291 if (!is_root_decoder(dev))
294 cxld = to_cxl_decoder(dev);
296 if (!(cxld->flags & CXL_DECODER_F_RAM))
299 return range_contains(&cxld->hpa_range, dev_range);
302 static void disable_hdm(void *_cxlhdm)
305 struct cxl_hdm *cxlhdm = _cxlhdm;
306 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
308 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
309 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
310 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
313 static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
315 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
318 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
319 writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
320 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
322 return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
325 int cxl_dvsec_rr_decode(struct device *dev, int d,
326 struct cxl_endpoint_dvsec_info *info)
328 struct pci_dev *pdev = to_pci_dev(dev);
329 int hdm_count, rc, i, ranges = 0;
333 dev_dbg(dev, "No DVSEC Capability\n");
337 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
341 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
345 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
346 dev_dbg(dev, "Not MEM Capable\n");
351 * It is not allowed by spec for MEM.capable to be set and have 0 legacy
352 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
353 * driver is for a spec defined class code which must be CXL.mem
354 * capable, there is no point in continuing to enable CXL.mem.
356 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
357 if (!hdm_count || hdm_count > 2)
360 rc = wait_for_valid(pdev, d);
362 dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
367 * The current DVSEC values are moot if the memory capability is
368 * disabled, and they will remain moot after the HDM Decoder
369 * capability is enabled.
371 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
372 if (!info->mem_enabled)
375 for (i = 0; i < hdm_count; i++) {
379 rc = pci_read_config_dword(
380 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
384 size = (u64)temp << 32;
386 rc = pci_read_config_dword(
387 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
391 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
393 info->dvsec_range[i] = (struct range) {
395 .end = CXL_RESOURCE_NONE,
400 rc = pci_read_config_dword(
401 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
405 base = (u64)temp << 32;
407 rc = pci_read_config_dword(
408 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
412 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
414 info->dvsec_range[i] = (struct range) {
416 .end = base + size - 1
422 info->ranges = ranges;
426 EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
429 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
430 * @cxlds: Device state
431 * @cxlhdm: Mapped HDM decoder Capability
432 * @info: Cached DVSEC range registers info
434 * Try to enable the endpoint's HDM Decoder Capability
436 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
437 struct cxl_endpoint_dvsec_info *info)
439 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
440 struct cxl_port *port = cxlhdm->port;
441 struct device *dev = cxlds->dev;
442 struct cxl_port *root;
447 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
450 * If the HDM Decoder Capability is already enabled then assume
451 * that some other agent like platform firmware set it up.
453 if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
454 return devm_cxl_enable_mem(&port->dev, cxlds);
458 root = to_cxl_port(port->dev.parent);
459 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
460 root = to_cxl_port(root->dev.parent);
461 if (!is_cxl_root(root)) {
462 dev_err(dev, "Failed to acquire root port for HDM enable\n");
466 for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
467 struct device *cxld_dev;
469 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
470 dvsec_range_allowed);
472 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
475 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
476 put_device(cxld_dev);
480 if (!allowed && info->mem_enabled) {
481 dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
486 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
487 * [High,Low] when HDM operation is enabled the range register values
488 * are ignored by the device, but the spec also recommends matching the
489 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
490 * are expected even though Linux does not require or maintain that
491 * match. If at least one DVSEC range is enabled and allowed, skip HDM
492 * Decoder Capability Enable.
494 if (info->mem_enabled)
497 rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
501 return devm_cxl_enable_mem(&port->dev, cxlds);
503 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
505 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
506 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
507 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
508 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
509 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
510 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
511 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
513 #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
514 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
515 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
516 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
517 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
518 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
520 static int cxl_cdat_get_length(struct device *dev,
521 struct pci_doe_mb *cdat_doe,
524 __le32 request = CDAT_DOE_REQ(0);
528 rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
529 CXL_DOE_PROTOCOL_TABLE_ACCESS,
530 &request, sizeof(request),
531 &response, sizeof(response));
533 dev_err(dev, "DOE failed: %d", rc);
536 if (rc < sizeof(response))
539 *length = le32_to_cpu(response[1]);
540 dev_dbg(dev, "CDAT length %zu\n", *length);
545 static int cxl_cdat_read_table(struct device *dev,
546 struct pci_doe_mb *cdat_doe,
547 void *cdat_table, size_t *cdat_length)
549 size_t length = *cdat_length + sizeof(__le32);
550 __le32 *data = cdat_table;
551 int entry_handle = 0;
555 __le32 request = CDAT_DOE_REQ(entry_handle);
556 struct cdat_entry_header *entry;
560 rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
561 CXL_DOE_PROTOCOL_TABLE_ACCESS,
562 &request, sizeof(request),
565 dev_err(dev, "DOE failed: %d", rc);
569 /* 1 DW Table Access Response Header + CDAT entry */
570 entry = (struct cdat_entry_header *)(data + 1);
571 if ((entry_handle == 0 &&
572 rc != sizeof(__le32) + sizeof(struct cdat_header)) ||
574 (rc < sizeof(__le32) + sizeof(*entry) ||
575 rc != sizeof(__le32) + le16_to_cpu(entry->length))))
578 /* Get the CXL table access header entry handle */
579 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
580 le32_to_cpu(data[0]));
581 entry_dw = rc / sizeof(__le32);
585 * Table Access Response Header overwrote the last DW of
586 * previous entry, so restore that DW
589 length -= entry_dw * sizeof(__le32);
592 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
594 /* Length in CDAT header may exceed concatenation of CDAT entries */
595 *cdat_length -= length - sizeof(__le32);
600 static unsigned char cdat_checksum(void *buf, size_t size)
602 unsigned char sum, *data = buf;
605 for (sum = 0, i = 0; i < size; i++)
611 * read_cdat_data - Read the CDAT data on this port
612 * @port: Port to read data from
614 * This call will sleep waiting for responses from the DOE mailbox.
616 void read_cdat_data(struct cxl_port *port)
618 struct device *uport = port->uport_dev;
619 struct device *dev = &port->dev;
620 struct pci_doe_mb *cdat_doe;
621 struct pci_dev *pdev = NULL;
622 struct cxl_memdev *cxlmd;
624 void *cdat_table, *cdat_buf;
627 if (is_cxl_memdev(uport)) {
630 cxlmd = to_cxl_memdev(uport);
631 host = cxlmd->dev.parent;
632 if (dev_is_pci(host))
633 pdev = to_pci_dev(host);
634 } else if (dev_is_pci(uport)) {
635 pdev = to_pci_dev(uport);
641 cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
642 CXL_DOE_PROTOCOL_TABLE_ACCESS);
644 dev_dbg(dev, "No CDAT mailbox\n");
648 port->cdat_available = true;
650 if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
651 dev_dbg(dev, "No CDAT length\n");
655 cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
659 rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
663 cdat_table = cdat_buf + sizeof(__le32);
664 if (cdat_checksum(cdat_table, cdat_length))
667 port->cdat.table = cdat_table;
668 port->cdat.length = cdat_length;
672 /* Don't leave table data allocated on error */
673 devm_kfree(dev, cdat_buf);
674 dev_err(dev, "Failed to read/validate CDAT.\n");
676 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
678 static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
679 void __iomem *ras_base)
687 addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
688 status = readl(addr);
689 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
690 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
691 trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
695 static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
697 return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
700 /* CXL spec rev3.0 8.2.4.16.1 */
701 static void header_log_copy(void __iomem *ras_base, u32 *log)
705 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
707 addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
710 for (i = 0; i < log_u32_size; i++) {
711 *log_addr = readl(addr);
718 * Log the state of the RAS status registers and prepare them to log the
719 * next error status. Return 1 if reset needed.
721 static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
722 void __iomem *ras_base)
724 u32 hl[CXL_HEADERLOG_SIZE_U32];
732 addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
733 status = readl(addr);
734 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
737 /* If multiple errors, log header points to first error from ctrl reg */
738 if (hweight32(status) > 1) {
739 void __iomem *rcc_addr =
740 ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
742 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
748 header_log_copy(ras_base, hl);
749 trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
750 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
755 static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
757 return __cxl_handle_ras(cxlds, cxlds->regs.ras);
760 #ifdef CONFIG_PCIEAER_CXL
762 static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
764 struct cxl_rcrb_info *ri = &dport->rcrb;
765 void __iomem *dport_aer = NULL;
766 resource_size_t aer_phys;
769 if (dport->rch && ri->aer_cap) {
770 host = dport->reg_map.host;
771 aer_phys = ri->aer_cap + ri->base;
772 dport_aer = devm_cxl_iomap_block(host, aer_phys,
773 sizeof(struct aer_capability_regs));
776 dport->regs.dport_aer = dport_aer;
779 static void cxl_dport_map_regs(struct cxl_dport *dport)
781 struct cxl_register_map *map = &dport->reg_map;
782 struct device *dev = dport->dport_dev;
784 if (!map->component_map.ras.valid)
785 dev_dbg(dev, "RAS registers not found\n");
786 else if (cxl_map_component_regs(map, &dport->regs.component,
787 BIT(CXL_CM_CAP_CAP_ID_RAS)))
788 dev_dbg(dev, "Failed to map RAS capability.\n");
791 cxl_dport_map_rch_aer(dport);
794 static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
796 void __iomem *aer_base = dport->regs.dport_aer;
797 struct pci_host_bridge *bridge;
798 u32 aer_cmd_mask, aer_cmd;
803 bridge = to_pci_host_bridge(dport->dport_dev);
806 * Disable RCH root port command interrupts.
807 * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
809 * This sequence may not be necessary. CXL spec states disabling
810 * the root cmd register's interrupts is required. But, PCI spec
811 * shows these are disabled by default on reset.
813 if (bridge->native_aer) {
814 aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
815 PCI_ERR_ROOT_CMD_NONFATAL_EN |
816 PCI_ERR_ROOT_CMD_FATAL_EN);
817 aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
818 aer_cmd &= ~aer_cmd_mask;
819 writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
823 void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
825 struct device *dport_dev = dport->dport_dev;
826 struct pci_host_bridge *host_bridge;
828 host_bridge = to_pci_host_bridge(dport_dev);
829 if (host_bridge->native_aer)
830 dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
832 dport->reg_map.host = host;
833 cxl_dport_map_regs(dport);
836 cxl_disable_rch_root_ints(dport);
838 EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
840 static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
841 struct cxl_dport *dport)
843 return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
846 static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
847 struct cxl_dport *dport)
849 return __cxl_handle_ras(cxlds, dport->regs.ras);
853 * Copy the AER capability registers using 32 bit read accesses.
854 * This is necessary because RCRB AER capability is MMIO mapped. Clear the
855 * status after copying.
857 * @aer_base: base address of AER capability block in RCRB
858 * @aer_regs: destination for copying AER capability
860 static bool cxl_rch_get_aer_info(void __iomem *aer_base,
861 struct aer_capability_regs *aer_regs)
863 int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
864 u32 *aer_regs_buf = (u32 *)aer_regs;
870 /* Use readl() to guarantee 32-bit accesses */
871 for (n = 0; n < read_cnt; n++)
872 aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
874 writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
875 writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
880 /* Get AER severity. Return false if there is no error. */
881 static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
884 if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
885 if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
886 *severity = AER_FATAL;
888 *severity = AER_NONFATAL;
892 if (aer_regs->cor_status & ~aer_regs->cor_mask) {
893 *severity = AER_CORRECTABLE;
900 static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
902 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
903 struct aer_capability_regs aer_regs;
904 struct cxl_dport *dport;
905 struct cxl_port *port;
908 port = cxl_pci_find_port(pdev, &dport);
912 put_device(&port->dev);
914 if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
917 if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
920 pci_print_aer(pdev, severity, &aer_regs);
922 if (severity == AER_CORRECTABLE)
923 cxl_handle_rdport_cor_ras(cxlds, dport);
925 cxl_handle_rdport_ras(cxlds, dport);
929 static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
932 void cxl_cor_error_detected(struct pci_dev *pdev)
934 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
935 struct device *dev = &cxlds->cxlmd->dev;
937 scoped_guard(device, dev) {
940 "%s: memdev disabled, abort error handling\n",
946 cxl_handle_rdport_errors(cxlds);
948 cxl_handle_endpoint_cor_ras(cxlds);
951 EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
953 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
954 pci_channel_state_t state)
956 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
957 struct cxl_memdev *cxlmd = cxlds->cxlmd;
958 struct device *dev = &cxlmd->dev;
961 scoped_guard(device, dev) {
964 "%s: memdev disabled, abort error handling\n",
966 return PCI_ERS_RESULT_DISCONNECT;
970 cxl_handle_rdport_errors(cxlds);
972 * A frozen channel indicates an impending reset which is fatal to
973 * CXL.mem operation, and will likely crash the system. On the off
974 * chance the situation is recoverable dump the status of the RAS
975 * capability registers and bounce the active state of the memdev.
977 ue = cxl_handle_endpoint_ras(cxlds);
982 case pci_channel_io_normal:
984 device_release_driver(dev);
985 return PCI_ERS_RESULT_NEED_RESET;
987 return PCI_ERS_RESULT_CAN_RECOVER;
988 case pci_channel_io_frozen:
990 "%s: frozen state error detected, disable CXL.mem\n",
992 device_release_driver(dev);
993 return PCI_ERS_RESULT_NEED_RESET;
994 case pci_channel_io_perm_failure:
996 "failure state error detected, request disconnect\n");
997 return PCI_ERS_RESULT_DISCONNECT;
999 return PCI_ERS_RESULT_NEED_RESET;
1001 EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
1003 static int cxl_flit_size(struct pci_dev *pdev)
1005 if (cxl_pci_flit_256(pdev))
1012 * cxl_pci_get_latency - calculate the link latency for the PCIe link
1015 * return: calculated latency or 0 for no latency
1017 * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
1018 * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
1019 * LinkProgationLatency is negligible, so 0 will be used
1020 * RetimerLatency is assumed to be negligible and 0 will be used
1021 * FlitLatency = FlitSize / LinkBandwidth
1022 * FlitSize is defined by spec. CXL rev3.0 4.2.1.
1023 * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
1024 * The FlitLatency is converted to picoseconds.
1026 long cxl_pci_get_latency(struct pci_dev *pdev)
1030 bw = pcie_link_speed_mbps(pdev);
1033 bw /= BITS_PER_BYTE;
1035 return cxl_flit_size(pdev) * MEGA / bw;