1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2018-2021 Intel Corporation
4 #include <linux/auxiliary_bus.h>
5 #include <linux/bitfield.h>
6 #include <linux/bitops.h>
7 #include <linux/hwmon.h>
8 #include <linux/jiffies.h>
9 #include <linux/module.h>
10 #include <linux/peci.h>
11 #include <linux/peci-cpu.h>
12 #include <linux/units.h>
13 #include <linux/workqueue.h>
17 #define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000)
19 /* Max number of channel ranks and DIMM index per channel */
20 #define CHAN_RANK_MAX_ON_HSX 8
21 #define DIMM_IDX_MAX_ON_HSX 3
22 #define CHAN_RANK_MAX_ON_BDX 4
23 #define DIMM_IDX_MAX_ON_BDX 3
24 #define CHAN_RANK_MAX_ON_BDXD 2
25 #define DIMM_IDX_MAX_ON_BDXD 2
26 #define CHAN_RANK_MAX_ON_SKX 6
27 #define DIMM_IDX_MAX_ON_SKX 2
28 #define CHAN_RANK_MAX_ON_ICX 8
29 #define DIMM_IDX_MAX_ON_ICX 2
30 #define CHAN_RANK_MAX_ON_ICXD 4
31 #define DIMM_IDX_MAX_ON_ICXD 2
33 #define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX
34 #define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX
35 #define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX)
37 #define CPU_SEG_MASK GENMASK(23, 16)
38 #define GET_CPU_SEG(x) (((x) & CPU_SEG_MASK) >> 16)
39 #define CPU_BUS_MASK GENMASK(7, 0)
40 #define GET_CPU_BUS(x) ((x) & CPU_BUS_MASK)
42 #define DIMM_TEMP_MAX GENMASK(15, 8)
43 #define DIMM_TEMP_CRIT GENMASK(23, 16)
44 #define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8)
45 #define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16)
47 #define NO_DIMM_RETRY_COUNT_MAX 5
55 int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order,
56 int chan_rank, u32 *data);
59 struct peci_dimm_thresholds {
62 struct peci_sensor_state state;
65 enum peci_dimm_threshold_type {
70 struct peci_dimmtemp {
71 struct peci_device *peci_dev;
74 const struct dimm_info *gen_info;
75 struct delayed_work detect_work;
77 struct peci_sensor_data temp;
78 struct peci_dimm_thresholds thresholds;
79 } dimm[DIMM_NUMS_MAX];
80 char **dimmtemp_label;
81 DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX);
82 u8 no_dimm_retry_count;
85 static u8 __dimm_temp(u32 reg, int dimm_order)
87 return (reg >> (dimm_order * 8)) & 0xff;
90 static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val)
92 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
93 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
97 mutex_lock(&priv->dimm[dimm_no].temp.state.lock);
98 if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state))
101 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data);
105 priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE;
107 peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state);
110 *val = priv->dimm[dimm_no].temp.value;
112 mutex_unlock(&priv->dimm[dimm_no].temp.state.lock);
116 static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
118 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
119 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
123 if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state))
126 ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
127 if (ret == -ENODATA) /* Use default or previous value */
132 priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE;
133 priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE;
135 peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state);
140 static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type,
141 int dimm_no, long *val)
145 mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock);
146 ret = update_thresholds(priv, dimm_no);
152 *val = priv->dimm[dimm_no].thresholds.temp_max;
155 *val = priv->dimm[dimm_no].thresholds.temp_crit;
162 mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock);
167 static int dimmtemp_read_string(struct device *dev,
168 enum hwmon_sensor_types type,
169 u32 attr, int channel, const char **str)
171 struct peci_dimmtemp *priv = dev_get_drvdata(dev);
173 if (attr != hwmon_temp_label)
176 *str = (const char *)priv->dimmtemp_label[channel];
181 static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type,
182 u32 attr, int channel, long *val)
184 struct peci_dimmtemp *priv = dev_get_drvdata(dev);
187 case hwmon_temp_input:
188 return get_dimm_temp(priv, channel, val);
190 return get_dimm_thresholds(priv, temp_max_type, channel, val);
191 case hwmon_temp_crit:
192 return get_dimm_thresholds(priv, temp_crit_type, channel, val);
200 static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type,
201 u32 attr, int channel)
203 const struct peci_dimmtemp *priv = data;
205 if (test_bit(channel, priv->dimm_mask))
211 static const struct hwmon_ops peci_dimmtemp_ops = {
212 .is_visible = dimmtemp_is_visible,
213 .read_string = dimmtemp_read_string,
214 .read = dimmtemp_read,
217 static int check_populated_dimms(struct peci_dimmtemp *priv)
219 int chan_rank_max = priv->gen_info->chan_rank_max;
220 int dimm_idx_max = priv->gen_info->dimm_idx_max;
221 u32 chan_rank_empty = 0;
223 int chan_rank, dimm_idx, ret;
226 BUILD_BUG_ON(BITS_PER_TYPE(chan_rank_empty) < CHAN_RANK_MAX);
227 BUILD_BUG_ON(BITS_PER_TYPE(dimm_mask) < DIMM_NUMS_MAX);
228 if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) {
229 WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d",
230 chan_rank_max, dimm_idx_max);
234 for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
235 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs);
238 * Overall, we expect either success or -EINVAL in
239 * order to determine whether DIMM is populated or not.
240 * For anything else we fall back to deferring the
241 * detection to be performed at a later point in time.
243 if (ret == -EINVAL) {
244 chan_rank_empty |= BIT(chan_rank);
251 for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++)
252 if (__dimm_temp(pcs, dimm_idx))
253 dimm_mask |= BIT(chan_rank * dimm_idx_max + dimm_idx);
257 * If we got all -EINVALs, it means that the CPU doesn't have any
258 * DIMMs. Unfortunately, it may also happen at the very start of
259 * host platform boot. Retrying a couple of times lets us make sure
260 * that the state is persistent.
262 if (chan_rank_empty == GENMASK(chan_rank_max - 1, 0)) {
263 if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) {
264 priv->no_dimm_retry_count++;
273 * It's possible that memory training is not done yet. In this case we
274 * defer the detection to be performed at a later point in time.
277 priv->no_dimm_retry_count = 0;
281 dev_dbg(priv->dev, "Scanned populated DIMMs: %#llx\n", dimm_mask);
283 bitmap_from_u64(priv->dimm_mask, dimm_mask);
288 static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan)
290 int rank = chan / priv->gen_info->dimm_idx_max;
291 int idx = chan % priv->gen_info->dimm_idx_max;
293 priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL,
294 "DIMM %c%d", 'A' + rank,
296 if (!priv->dimmtemp_label[chan])
302 static const u32 peci_dimmtemp_temp_channel_config[] = {
303 [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
307 static const struct hwmon_channel_info peci_dimmtemp_temp_channel = {
309 .config = peci_dimmtemp_temp_channel_config,
312 static const struct hwmon_channel_info *peci_dimmtemp_temp_info[] = {
313 &peci_dimmtemp_temp_channel,
317 static const struct hwmon_chip_info peci_dimmtemp_chip_info = {
318 .ops = &peci_dimmtemp_ops,
319 .info = peci_dimmtemp_temp_info,
322 static int create_dimm_temp_info(struct peci_dimmtemp *priv)
324 int ret, i, channels;
328 * We expect to either find populated DIMMs and carry on with creating
329 * sensors, or find out that there are no DIMMs populated.
330 * All other states mean that the platform never reached the state that
331 * allows to check DIMM state - causing us to retry later on.
333 ret = check_populated_dimms(priv);
334 if (ret == -ENODEV) {
335 dev_dbg(priv->dev, "No DIMMs found\n");
338 schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES);
339 dev_dbg(priv->dev, "Deferred populating DIMM temp info\n");
343 channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max;
345 priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL);
346 if (!priv->dimmtemp_label)
349 for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) {
350 ret = create_dimm_temp_label(priv, i);
353 mutex_init(&priv->dimm[i].thresholds.state.lock);
354 mutex_init(&priv->dimm[i].temp.state.lock);
357 dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv,
358 &peci_dimmtemp_chip_info, NULL);
360 dev_err(priv->dev, "Failed to register hwmon device\n");
364 dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name);
369 static void create_dimm_temp_info_delayed(struct work_struct *work)
371 struct peci_dimmtemp *priv = container_of(to_delayed_work(work),
372 struct peci_dimmtemp,
376 ret = create_dimm_temp_info(priv);
377 if (ret && ret != -EAGAIN)
378 dev_err(priv->dev, "Failed to populate DIMM temp info\n");
381 static void remove_delayed_work(void *_priv)
383 struct peci_dimmtemp *priv = _priv;
385 cancel_delayed_work_sync(&priv->detect_work);
388 static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
390 struct device *dev = &adev->dev;
391 struct peci_device *peci_dev = to_peci_device(dev->parent);
392 struct peci_dimmtemp *priv;
395 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
399 priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d",
400 peci_dev->info.socket_id);
405 priv->peci_dev = peci_dev;
406 priv->gen_info = (const struct dimm_info *)id->driver_data;
409 * This is just a sanity check. Since we're using commands that are
410 * guaranteed to be supported on a given platform, we should never see
411 * revision lower than expected.
413 if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision)
415 "Unexpected PECI revision %#x, some features may be unavailable\n",
416 peci_dev->info.peci_revision);
418 INIT_DELAYED_WORK(&priv->detect_work, create_dimm_temp_info_delayed);
420 ret = devm_add_action_or_reset(priv->dev, remove_delayed_work, priv);
424 ret = create_dimm_temp_info(priv);
425 if (ret && ret != -EAGAIN) {
426 dev_err(dev, "Failed to populate DIMM temp info\n");
434 read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
441 * Device 20, Function 0: IMC 0 channel 0 -> rank 0
442 * Device 20, Function 1: IMC 0 channel 1 -> rank 1
443 * Device 21, Function 0: IMC 0 channel 2 -> rank 2
444 * Device 21, Function 1: IMC 0 channel 3 -> rank 3
445 * Device 23, Function 0: IMC 1 channel 0 -> rank 4
446 * Device 23, Function 1: IMC 1 channel 1 -> rank 5
447 * Device 24, Function 0: IMC 1 channel 2 -> rank 6
448 * Device 24, Function 1: IMC 1 channel 3 -> rank 7
450 dev = 20 + chan_rank / 2 + chan_rank / 4;
451 func = chan_rank % 2;
452 reg = 0x120 + dimm_order * 4;
454 ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data);
462 read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
469 * Device 10, Function 2: IMC 0 channel 0 -> rank 0
470 * Device 10, Function 6: IMC 0 channel 1 -> rank 1
471 * Device 12, Function 2: IMC 1 channel 0 -> rank 2
472 * Device 12, Function 6: IMC 1 channel 1 -> rank 3
474 dev = 10 + chan_rank / 2 * 2;
475 func = (chan_rank % 2) ? 6 : 2;
476 reg = 0x120 + dimm_order * 4;
478 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
486 read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
493 * Device 10, Function 2: IMC 0 channel 0 -> rank 0
494 * Device 10, Function 6: IMC 0 channel 1 -> rank 1
495 * Device 11, Function 2: IMC 0 channel 2 -> rank 2
496 * Device 12, Function 2: IMC 1 channel 0 -> rank 3
497 * Device 12, Function 6: IMC 1 channel 1 -> rank 4
498 * Device 13, Function 2: IMC 1 channel 2 -> rank 5
500 dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0);
501 func = chan_rank % 3 == 1 ? 6 : 2;
502 reg = 0x120 + dimm_order * 4;
504 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
512 read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
519 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val);
520 if (ret || !(reg_val & BIT(31)))
521 return -ENODATA; /* Use default or previous value */
523 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val);
525 return -ENODATA; /* Use default or previous value */
528 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
529 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1
530 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2
531 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3
532 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4
533 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5
534 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6
535 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7
537 dev = 26 + chan_rank / 2;
538 offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000;
540 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val),
541 dev, 0, offset, data);
548 static const struct dimm_info dimm_hsx = {
549 .chan_rank_max = CHAN_RANK_MAX_ON_HSX,
550 .dimm_idx_max = DIMM_IDX_MAX_ON_HSX,
551 .min_peci_revision = 0x33,
552 .read_thresholds = &read_thresholds_hsx,
555 static const struct dimm_info dimm_bdx = {
556 .chan_rank_max = CHAN_RANK_MAX_ON_BDX,
557 .dimm_idx_max = DIMM_IDX_MAX_ON_BDX,
558 .min_peci_revision = 0x33,
559 .read_thresholds = &read_thresholds_hsx,
562 static const struct dimm_info dimm_bdxd = {
563 .chan_rank_max = CHAN_RANK_MAX_ON_BDXD,
564 .dimm_idx_max = DIMM_IDX_MAX_ON_BDXD,
565 .min_peci_revision = 0x33,
566 .read_thresholds = &read_thresholds_bdxd,
569 static const struct dimm_info dimm_skx = {
570 .chan_rank_max = CHAN_RANK_MAX_ON_SKX,
571 .dimm_idx_max = DIMM_IDX_MAX_ON_SKX,
572 .min_peci_revision = 0x33,
573 .read_thresholds = &read_thresholds_skx,
576 static const struct dimm_info dimm_icx = {
577 .chan_rank_max = CHAN_RANK_MAX_ON_ICX,
578 .dimm_idx_max = DIMM_IDX_MAX_ON_ICX,
579 .min_peci_revision = 0x40,
580 .read_thresholds = &read_thresholds_icx,
583 static const struct dimm_info dimm_icxd = {
584 .chan_rank_max = CHAN_RANK_MAX_ON_ICXD,
585 .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD,
586 .min_peci_revision = 0x40,
587 .read_thresholds = &read_thresholds_icx,
590 static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
592 .name = "peci_cpu.dimmtemp.hsx",
593 .driver_data = (kernel_ulong_t)&dimm_hsx,
596 .name = "peci_cpu.dimmtemp.bdx",
597 .driver_data = (kernel_ulong_t)&dimm_bdx,
600 .name = "peci_cpu.dimmtemp.bdxd",
601 .driver_data = (kernel_ulong_t)&dimm_bdxd,
604 .name = "peci_cpu.dimmtemp.skx",
605 .driver_data = (kernel_ulong_t)&dimm_skx,
608 .name = "peci_cpu.dimmtemp.icx",
609 .driver_data = (kernel_ulong_t)&dimm_icx,
612 .name = "peci_cpu.dimmtemp.icxd",
613 .driver_data = (kernel_ulong_t)&dimm_icxd,
617 MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids);
619 static struct auxiliary_driver peci_dimmtemp_driver = {
620 .probe = peci_dimmtemp_probe,
621 .id_table = peci_dimmtemp_ids,
624 module_auxiliary_driver(peci_dimmtemp_driver);
626 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
627 MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
628 MODULE_DESCRIPTION("PECI dimmtemp driver");
629 MODULE_LICENSE("GPL");
630 MODULE_IMPORT_NS(PECI_CPU);