1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe PMU driver
5 * Copyright (C) 2021-2023 Alibaba Inc.
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
10 #include <linux/cpuhotplug.h>
11 #include <linux/cpumask.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/smp.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
23 #define DWC_PCIE_VSEC_RAS_DES_ID 0x02
24 #define DWC_PCIE_EVENT_CNT_CTL 0x8
27 * Event Counter Data Select includes two parts:
28 * - 27-24: Group number(4-bit: 0..0x7)
29 * - 23-16: Event number(8-bit: 0..0x13) within the Group
31 * Put them together as in TRM.
33 #define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16)
34 #define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8)
35 #define DWC_PCIE_CNT_STATUS BIT(7)
36 #define DWC_PCIE_CNT_ENABLE GENMASK(4, 2)
37 #define DWC_PCIE_PER_EVENT_OFF 0x1
38 #define DWC_PCIE_PER_EVENT_ON 0x3
39 #define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
40 #define DWC_PCIE_EVENT_PER_CLEAR 0x1
42 #define DWC_PCIE_EVENT_CNT_DATA 0xC
44 #define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
45 #define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24)
46 #define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8)
47 #define DWC_PCIE_DURATION_MANUAL_CTL 0x0
48 #define DWC_PCIE_DURATION_1MS 0x1
49 #define DWC_PCIE_DURATION_10MS 0x2
50 #define DWC_PCIE_DURATION_100MS 0x3
51 #define DWC_PCIE_DURATION_1S 0x4
52 #define DWC_PCIE_DURATION_2S 0x5
53 #define DWC_PCIE_DURATION_4S 0x6
54 #define DWC_PCIE_DURATION_4US 0xFF
55 #define DWC_PCIE_TIME_BASED_TIMER_START BIT(0)
56 #define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1
58 #define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14
59 #define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18
61 /* Event attributes */
62 #define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0)
63 #define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16)
64 #define DWC_PCIE_CONFIG_LANE GENMASK(27, 20)
66 #define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
67 #define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
68 #define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
70 enum dwc_pcie_event_type {
71 DWC_PCIE_TIME_BASE_EVENT,
73 DWC_PCIE_EVENT_TYPE_MAX,
76 #define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
77 #define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
81 struct pci_dev *pdev; /* Root Port device */
85 struct list_head pmu_node;
86 struct hlist_node cpuhp_node;
87 struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
91 #define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
93 static int dwc_pcie_pmu_hp_state;
94 static struct list_head dwc_pcie_dev_info_head =
95 LIST_HEAD_INIT(dwc_pcie_dev_info_head);
98 struct dwc_pcie_dev_info {
99 struct platform_device *plat_dev;
100 struct pci_dev *pdev;
101 struct list_head dev_node;
104 struct dwc_pcie_vendor_id {
108 static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
109 {.vendor_id = PCI_VENDOR_ID_ALIBABA },
113 static ssize_t cpumask_show(struct device *dev,
114 struct device_attribute *attr,
117 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev));
119 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
121 static DEVICE_ATTR_RO(cpumask);
123 static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = {
124 &dev_attr_cpumask.attr,
128 static struct attribute_group dwc_pcie_cpumask_attr_group = {
129 .attrs = dwc_pcie_pmu_cpumask_attrs,
132 struct dwc_pcie_format_attr {
133 struct device_attribute attr;
138 PMU_FORMAT_ATTR(eventid, "config:0-15");
139 PMU_FORMAT_ATTR(type, "config:16-19");
140 PMU_FORMAT_ATTR(lane, "config:20-27");
142 static struct attribute *dwc_pcie_format_attrs[] = {
143 &format_attr_type.attr,
144 &format_attr_eventid.attr,
145 &format_attr_lane.attr,
149 static struct attribute_group dwc_pcie_format_attrs_group = {
151 .attrs = dwc_pcie_format_attrs,
154 struct dwc_pcie_event_attr {
155 struct device_attribute attr;
156 enum dwc_pcie_event_type type;
161 static ssize_t dwc_pcie_event_show(struct device *dev,
162 struct device_attribute *attr, char *buf)
164 struct dwc_pcie_event_attr *eattr;
166 eattr = container_of(attr, typeof(*eattr), attr);
168 if (eattr->type == DWC_PCIE_LANE_EVENT)
169 return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n",
170 eattr->eventid, eattr->type);
171 else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT)
172 return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n",
173 eattr->eventid, eattr->type);
178 #define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \
179 (&((struct dwc_pcie_event_attr[]) {{ \
180 .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \
182 .eventid = _eventid, \
186 #define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \
187 DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
188 #define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \
189 DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
191 static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
193 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00),
194 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01),
195 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02),
196 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03),
197 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04),
198 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
199 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
200 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
201 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
202 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
205 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20),
206 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21),
207 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22),
208 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23),
211 * Leave it to the user to specify the lane ID to avoid generating
212 * a list of hundreds of events.
214 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600),
215 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
216 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
217 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
218 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604),
219 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605),
220 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606),
221 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
222 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
223 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
224 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703),
225 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704),
226 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705),
227 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706),
228 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707),
229 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708),
230 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709),
231 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A),
232 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B),
233 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C),
234 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F),
235 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710),
236 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711),
237 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712),
238 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713),
239 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714),
240 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715),
241 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716),
242 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717),
246 static const struct attribute_group dwc_pcie_event_attrs_group = {
248 .attrs = dwc_pcie_pmu_time_event_attrs,
251 static const struct attribute_group *dwc_pcie_attr_groups[] = {
252 &dwc_pcie_event_attrs_group,
253 &dwc_pcie_format_attrs_group,
254 &dwc_pcie_cpumask_attr_group,
258 static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
261 struct pci_dev *pdev = pcie_pmu->pdev;
262 u16 ras_des_offset = pcie_pmu->ras_des_offset;
265 pci_clear_and_set_config_dword(pdev,
266 ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
267 DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
269 pci_clear_and_set_config_dword(pdev,
270 ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
271 DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
274 static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
277 struct pci_dev *pdev = pcie_pmu->pdev;
278 u16 ras_des_offset = pcie_pmu->ras_des_offset;
280 pci_clear_and_set_config_dword(pdev,
281 ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL,
282 DWC_PCIE_TIME_BASED_TIMER_START, enable);
285 static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
287 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
288 struct pci_dev *pdev = pcie_pmu->pdev;
289 u16 ras_des_offset = pcie_pmu->ras_des_offset;
292 pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
297 static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event)
299 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
300 struct pci_dev *pdev = pcie_pmu->pdev;
301 int event_id = DWC_PCIE_EVENT_ID(event);
302 u16 ras_des_offset = pcie_pmu->ras_des_offset;
307 * The 64-bit value of the data counter is spread across two
308 * registers that are not synchronized. In order to read them
309 * atomically, ensure that the high 32 bits match before and after
310 * reading the low 32 bits.
312 pci_read_config_dword(pdev,
313 ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi);
315 /* snapshot the high 32 bits */
318 pci_read_config_dword(
319 pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW,
321 pci_read_config_dword(
322 pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH,
326 val = ((u64)hi << 32) | lo;
328 * The Group#1 event measures the amount of data processed in 16-byte
329 * units. Simplify the end-user interface by multiplying the counter
330 * at the point of read.
332 if (event_id >= 0x20 && event_id <= 0x23)
338 static void dwc_pcie_pmu_event_update(struct perf_event *event)
340 struct hw_perf_event *hwc = &event->hw;
341 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
342 u64 delta, prev, now = 0;
345 prev = local64_read(&hwc->prev_count);
347 if (type == DWC_PCIE_LANE_EVENT)
348 now = dwc_pcie_pmu_read_lane_event_counter(event);
349 else if (type == DWC_PCIE_TIME_BASE_EVENT)
350 now = dwc_pcie_pmu_read_time_based_counter(event);
352 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
354 delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
355 /* 32-bit counter for Lane Event Counting */
356 if (type == DWC_PCIE_LANE_EVENT)
357 delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
359 local64_add(delta, &event->count);
362 static int dwc_pcie_pmu_event_init(struct perf_event *event)
364 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
365 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
366 struct perf_event *sibling;
369 if (event->attr.type != event->pmu->type)
372 /* We don't support sampling */
373 if (is_sampling_event(event))
376 /* We cannot support task bound events */
377 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
380 if (event->group_leader != event &&
381 !is_software_event(event->group_leader))
384 for_each_sibling_event(sibling, event->group_leader) {
385 if (sibling->pmu != event->pmu && !is_software_event(sibling))
389 if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
392 if (type == DWC_PCIE_LANE_EVENT) {
393 lane = DWC_PCIE_EVENT_LANE(event);
394 if (lane < 0 || lane >= pcie_pmu->nr_lanes)
398 event->cpu = pcie_pmu->on_cpu;
403 static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
405 struct hw_perf_event *hwc = &event->hw;
406 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
407 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
410 local64_set(&hwc->prev_count, 0);
412 if (type == DWC_PCIE_LANE_EVENT)
413 dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
414 else if (type == DWC_PCIE_TIME_BASE_EVENT)
415 dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
418 static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
420 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
421 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
422 struct hw_perf_event *hwc = &event->hw;
424 if (event->hw.state & PERF_HES_STOPPED)
427 if (type == DWC_PCIE_LANE_EVENT)
428 dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
429 else if (type == DWC_PCIE_TIME_BASE_EVENT)
430 dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
432 dwc_pcie_pmu_event_update(event);
433 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
436 static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
438 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
439 struct pci_dev *pdev = pcie_pmu->pdev;
440 struct hw_perf_event *hwc = &event->hw;
441 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
442 int event_id = DWC_PCIE_EVENT_ID(event);
443 int lane = DWC_PCIE_EVENT_LANE(event);
444 u16 ras_des_offset = pcie_pmu->ras_des_offset;
447 /* one counter for each type and it is in use */
448 if (pcie_pmu->event[type])
451 pcie_pmu->event[type] = event;
452 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
454 if (type == DWC_PCIE_LANE_EVENT) {
455 /* EVENT_COUNTER_DATA_REG needs clear manually */
456 ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
457 FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
458 FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) |
459 FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
460 pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
462 } else if (type == DWC_PCIE_TIME_BASE_EVENT) {
464 * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
465 * use it with any manually controlled duration. And it is
466 * cleared when next measurement starts.
468 ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) |
469 FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL,
470 DWC_PCIE_DURATION_MANUAL_CTL) |
471 DWC_PCIE_TIME_BASED_CNT_ENABLE;
472 pci_write_config_dword(
473 pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl);
476 if (flags & PERF_EF_START)
477 dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD);
479 perf_event_update_userpage(event);
484 static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
486 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
487 enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
489 dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
490 perf_event_update_userpage(event);
491 pcie_pmu->event[type] = NULL;
494 static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
496 cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node);
500 * Find the binded DES capability device info of a PCI device.
501 * @pdev: The PCI device.
503 static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev)
505 struct dwc_pcie_dev_info *dev_info;
507 list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node)
508 if (dev_info->pdev == pdev)
514 static void dwc_pcie_unregister_pmu(void *data)
516 struct dwc_pcie_pmu *pcie_pmu = data;
518 perf_pmu_unregister(&pcie_pmu->pmu);
521 static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
523 const struct dwc_pcie_vendor_id *vid;
527 if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
530 for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
531 vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
532 DWC_PCIE_VSEC_RAS_DES_ID);
539 pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
540 if (PCI_VNDR_HEADER_REV(val) != 0x04)
544 "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
548 static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
550 platform_device_unregister(dev_info->plat_dev);
551 list_del(&dev_info->dev_node);
555 static int dwc_pcie_register_dev(struct pci_dev *pdev)
557 struct platform_device *plat_dev;
558 struct dwc_pcie_dev_info *dev_info;
561 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
562 plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf,
563 pdev, sizeof(*pdev));
565 if (IS_ERR(plat_dev))
566 return PTR_ERR(plat_dev);
568 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
572 /* Cache platform device to handle pci device hotplug */
573 dev_info->plat_dev = plat_dev;
574 dev_info->pdev = pdev;
575 list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head);
580 static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
581 unsigned long action, void *data)
583 struct device *dev = data;
584 struct pci_dev *pdev = to_pci_dev(dev);
585 struct dwc_pcie_dev_info *dev_info;
588 case BUS_NOTIFY_ADD_DEVICE:
589 if (!dwc_pcie_match_des_cap(pdev))
591 if (dwc_pcie_register_dev(pdev))
594 case BUS_NOTIFY_DEL_DEVICE:
595 dev_info = dwc_pcie_find_dev_info(pdev);
598 dwc_pcie_unregister_dev(dev_info);
605 static struct notifier_block dwc_pcie_pmu_nb = {
606 .notifier_call = dwc_pcie_pmu_notifier,
609 static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
611 struct pci_dev *pdev = plat_dev->dev.platform_data;
612 struct dwc_pcie_pmu *pcie_pmu;
618 vsec = pci_find_vsec_capability(pdev, pdev->vendor,
619 DWC_PCIE_VSEC_RAS_DES_ID);
620 pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
621 bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
622 name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf);
626 pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
630 pcie_pmu->pdev = pdev;
631 pcie_pmu->ras_des_offset = vsec;
632 pcie_pmu->nr_lanes = pcie_get_width_cap(pdev);
633 pcie_pmu->on_cpu = -1;
634 pcie_pmu->pmu = (struct pmu){
636 .parent = &pdev->dev,
637 .module = THIS_MODULE,
638 .attr_groups = dwc_pcie_attr_groups,
639 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
640 .task_ctx_nr = perf_invalid_context,
641 .event_init = dwc_pcie_pmu_event_init,
642 .add = dwc_pcie_pmu_event_add,
643 .del = dwc_pcie_pmu_event_del,
644 .start = dwc_pcie_pmu_event_start,
645 .stop = dwc_pcie_pmu_event_stop,
646 .read = dwc_pcie_pmu_event_update,
649 /* Add this instance to the list used by the offline callback */
650 ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
651 &pcie_pmu->cpuhp_node);
653 pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf);
657 /* Unwind when platform driver removes */
658 ret = devm_add_action_or_reset(&plat_dev->dev,
659 dwc_pcie_pmu_remove_cpuhp_instance,
660 &pcie_pmu->cpuhp_node);
664 ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
666 pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf);
669 ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
677 static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
679 struct dwc_pcie_pmu *pcie_pmu;
681 pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
682 if (pcie_pmu->on_cpu == -1)
683 pcie_pmu->on_cpu = cpumask_local_spread(
684 0, dev_to_node(&pcie_pmu->pdev->dev));
689 static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
691 struct dwc_pcie_pmu *pcie_pmu;
692 struct pci_dev *pdev;
696 pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
697 /* Nothing to do if this CPU doesn't own the PMU */
698 if (cpu != pcie_pmu->on_cpu)
701 pcie_pmu->on_cpu = -1;
702 pdev = pcie_pmu->pdev;
703 node = dev_to_node(&pdev->dev);
705 target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
706 if (target >= nr_cpu_ids)
707 target = cpumask_any_but(cpu_online_mask, cpu);
709 if (target >= nr_cpu_ids) {
710 pci_err(pdev, "There is no CPU to set\n");
714 /* This PMU does NOT support interrupt, just migrate context. */
715 perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
716 pcie_pmu->on_cpu = target;
721 static struct platform_driver dwc_pcie_pmu_driver = {
722 .probe = dwc_pcie_pmu_probe,
723 .driver = {.name = "dwc_pcie_pmu",},
726 static int __init dwc_pcie_pmu_init(void)
728 struct pci_dev *pdev = NULL;
732 for_each_pci_dev(pdev) {
733 if (!dwc_pcie_match_des_cap(pdev))
736 ret = dwc_pcie_register_dev(pdev);
747 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
748 "perf/dwc_pcie_pmu:online",
749 dwc_pcie_pmu_online_cpu,
750 dwc_pcie_pmu_offline_cpu);
754 dwc_pcie_pmu_hp_state = ret;
756 ret = platform_driver_register(&dwc_pcie_pmu_driver);
758 goto platform_driver_register_err;
760 ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
762 goto platform_driver_register_err;
767 platform_driver_register_err:
768 cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
773 static void __exit dwc_pcie_pmu_exit(void)
775 struct dwc_pcie_dev_info *dev_info, *tmp;
778 bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
779 list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
780 dwc_pcie_unregister_dev(dev_info);
781 platform_driver_unregister(&dwc_pcie_pmu_driver);
782 cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
785 module_init(dwc_pcie_pmu_init);
786 module_exit(dwc_pcie_pmu_exit);
788 MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
789 MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
790 MODULE_LICENSE("GPL v2");