rust: upgrade to Rust 1.76.0
[sfrench/cifs-2.6.git] / drivers / cxl / core / mbox.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/security.h>
4 #include <linux/debugfs.h>
5 #include <linux/ktime.h>
6 #include <linux/mutex.h>
7 #include <asm/unaligned.h>
8 #include <cxlpci.h>
9 #include <cxlmem.h>
10 #include <cxl.h>
11
12 #include "core.h"
13 #include "trace.h"
14
15 static bool cxl_raw_allow_all;
16
17 /**
18  * DOC: cxl mbox
19  *
20  * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
21  * implementation is used by the cxl_pci driver to initialize the device
22  * and implement the cxl_mem.h IOCTL UAPI. It also implements the
23  * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
24  */
25
26 #define cxl_for_each_cmd(cmd)                                                  \
27         for ((cmd) = &cxl_mem_commands[0];                                     \
28              ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
29
30 #define CXL_CMD(_id, sin, sout, _flags)                                        \
31         [CXL_MEM_COMMAND_ID_##_id] = {                                         \
32         .info = {                                                              \
33                         .id = CXL_MEM_COMMAND_ID_##_id,                        \
34                         .size_in = sin,                                        \
35                         .size_out = sout,                                      \
36                 },                                                             \
37         .opcode = CXL_MBOX_OP_##_id,                                           \
38         .flags = _flags,                                                       \
39         }
40
41 #define CXL_VARIABLE_PAYLOAD    ~0U
42 /*
43  * This table defines the supported mailbox commands for the driver. This table
44  * is made up of a UAPI structure. Non-negative values as parameters in the
45  * table will be validated against the user's input. For example, if size_in is
46  * 0, and the user passed in 1, it is an error.
47  */
48 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
49         CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
50 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
51         CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
52 #endif
53         CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
54         CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
55         CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
56         CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
57         CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
58         CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
59         CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
60         CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
61         CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
62         CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
63         CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
64         CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
65         CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
66         CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
67 };
68
69 /*
70  * Commands that RAW doesn't permit. The rationale for each:
71  *
72  * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
73  * coordination of transaction timeout values at the root bridge level.
74  *
75  * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
76  * and needs to be coordinated with HDM updates.
77  *
78  * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
79  * driver and any writes from userspace invalidates those contents.
80  *
81  * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
82  * to the device after it is marked clean, userspace can not make that
83  * assertion.
84  *
85  * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
86  * is kept up to date with patrol notifications and error management.
87  *
88  * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
89  * driver orchestration for safety.
90  */
91 static u16 cxl_disabled_raw_commands[] = {
92         CXL_MBOX_OP_ACTIVATE_FW,
93         CXL_MBOX_OP_SET_PARTITION_INFO,
94         CXL_MBOX_OP_SET_LSA,
95         CXL_MBOX_OP_SET_SHUTDOWN_STATE,
96         CXL_MBOX_OP_SCAN_MEDIA,
97         CXL_MBOX_OP_GET_SCAN_MEDIA,
98         CXL_MBOX_OP_GET_POISON,
99         CXL_MBOX_OP_INJECT_POISON,
100         CXL_MBOX_OP_CLEAR_POISON,
101 };
102
103 /*
104  * Command sets that RAW doesn't permit. All opcodes in this set are
105  * disabled because they pass plain text security payloads over the
106  * user/kernel boundary. This functionality is intended to be wrapped
107  * behind the keys ABI which allows for encrypted payloads in the UAPI
108  */
109 static u8 security_command_sets[] = {
110         0x44, /* Sanitize */
111         0x45, /* Persistent Memory Data-at-rest Security */
112         0x46, /* Security Passthrough */
113 };
114
115 static bool cxl_is_security_command(u16 opcode)
116 {
117         int i;
118
119         for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
120                 if (security_command_sets[i] == (opcode >> 8))
121                         return true;
122         return false;
123 }
124
125 static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
126                                          u16 opcode)
127 {
128         switch (opcode) {
129         case CXL_MBOX_OP_SANITIZE:
130                 set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
131                 break;
132         case CXL_MBOX_OP_SECURE_ERASE:
133                 set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
134                         security->enabled_cmds);
135                 break;
136         case CXL_MBOX_OP_GET_SECURITY_STATE:
137                 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
138                         security->enabled_cmds);
139                 break;
140         case CXL_MBOX_OP_SET_PASSPHRASE:
141                 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
142                         security->enabled_cmds);
143                 break;
144         case CXL_MBOX_OP_DISABLE_PASSPHRASE:
145                 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
146                         security->enabled_cmds);
147                 break;
148         case CXL_MBOX_OP_UNLOCK:
149                 set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
150                 break;
151         case CXL_MBOX_OP_FREEZE_SECURITY:
152                 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
153                         security->enabled_cmds);
154                 break;
155         case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
156                 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
157                         security->enabled_cmds);
158                 break;
159         default:
160                 break;
161         }
162 }
163
164 static bool cxl_is_poison_command(u16 opcode)
165 {
166 #define CXL_MBOX_OP_POISON_CMDS 0x43
167
168         if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
169                 return true;
170
171         return false;
172 }
173
174 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
175                                        u16 opcode)
176 {
177         switch (opcode) {
178         case CXL_MBOX_OP_GET_POISON:
179                 set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
180                 break;
181         case CXL_MBOX_OP_INJECT_POISON:
182                 set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
183                 break;
184         case CXL_MBOX_OP_CLEAR_POISON:
185                 set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
186                 break;
187         case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
188                 set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
189                 break;
190         case CXL_MBOX_OP_SCAN_MEDIA:
191                 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
192                 break;
193         case CXL_MBOX_OP_GET_SCAN_MEDIA:
194                 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
195                 break;
196         default:
197                 break;
198         }
199 }
200
201 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
202 {
203         struct cxl_mem_command *c;
204
205         cxl_for_each_cmd(c)
206                 if (c->opcode == opcode)
207                         return c;
208
209         return NULL;
210 }
211
212 static const char *cxl_mem_opcode_to_name(u16 opcode)
213 {
214         struct cxl_mem_command *c;
215
216         c = cxl_mem_find_command(opcode);
217         if (!c)
218                 return NULL;
219
220         return cxl_command_names[c->info.id].name;
221 }
222
223 /**
224  * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
225  * @mds: The driver data for the operation
226  * @mbox_cmd: initialized command to execute
227  *
228  * Context: Any context.
229  * Return:
230  *  * %>=0      - Number of bytes returned in @out.
231  *  * %-E2BIG   - Payload is too large for hardware.
232  *  * %-EBUSY   - Couldn't acquire exclusive mailbox access.
233  *  * %-EFAULT  - Hardware error occurred.
234  *  * %-ENXIO   - Command completed, but device reported an error.
235  *  * %-EIO     - Unexpected output size.
236  *
237  * Mailbox commands may execute successfully yet the device itself reported an
238  * error. While this distinction can be useful for commands from userspace, the
239  * kernel will only be able to use results when both are successful.
240  */
241 int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
242                           struct cxl_mbox_cmd *mbox_cmd)
243 {
244         size_t out_size, min_out;
245         int rc;
246
247         if (mbox_cmd->size_in > mds->payload_size ||
248             mbox_cmd->size_out > mds->payload_size)
249                 return -E2BIG;
250
251         out_size = mbox_cmd->size_out;
252         min_out = mbox_cmd->min_out;
253         rc = mds->mbox_send(mds, mbox_cmd);
254         /*
255          * EIO is reserved for a payload size mismatch and mbox_send()
256          * may not return this error.
257          */
258         if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
259                 return -ENXIO;
260         if (rc)
261                 return rc;
262
263         if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
264             mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
265                 return cxl_mbox_cmd_rc2errno(mbox_cmd);
266
267         if (!out_size)
268                 return 0;
269
270         /*
271          * Variable sized output needs to at least satisfy the caller's
272          * minimum if not the fully requested size.
273          */
274         if (min_out == 0)
275                 min_out = out_size;
276
277         if (mbox_cmd->size_out < min_out)
278                 return -EIO;
279         return 0;
280 }
281 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
282
283 static bool cxl_mem_raw_command_allowed(u16 opcode)
284 {
285         int i;
286
287         if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
288                 return false;
289
290         if (security_locked_down(LOCKDOWN_PCI_ACCESS))
291                 return false;
292
293         if (cxl_raw_allow_all)
294                 return true;
295
296         if (cxl_is_security_command(opcode))
297                 return false;
298
299         for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
300                 if (cxl_disabled_raw_commands[i] == opcode)
301                         return false;
302
303         return true;
304 }
305
306 /**
307  * cxl_payload_from_user_allowed() - Check contents of in_payload.
308  * @opcode: The mailbox command opcode.
309  * @payload_in: Pointer to the input payload passed in from user space.
310  *
311  * Return:
312  *  * true      - payload_in passes check for @opcode.
313  *  * false     - payload_in contains invalid or unsupported values.
314  *
315  * The driver may inspect payload contents before sending a mailbox
316  * command from user space to the device. The intent is to reject
317  * commands with input payloads that are known to be unsafe. This
318  * check is not intended to replace the users careful selection of
319  * mailbox command parameters and makes no guarantee that the user
320  * command will succeed, nor that it is appropriate.
321  *
322  * The specific checks are determined by the opcode.
323  */
324 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
325 {
326         switch (opcode) {
327         case CXL_MBOX_OP_SET_PARTITION_INFO: {
328                 struct cxl_mbox_set_partition_info *pi = payload_in;
329
330                 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
331                         return false;
332                 break;
333         }
334         default:
335                 break;
336         }
337         return true;
338 }
339
340 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
341                              struct cxl_memdev_state *mds, u16 opcode,
342                              size_t in_size, size_t out_size, u64 in_payload)
343 {
344         *mbox = (struct cxl_mbox_cmd) {
345                 .opcode = opcode,
346                 .size_in = in_size,
347         };
348
349         if (in_size) {
350                 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
351                                                 in_size);
352                 if (IS_ERR(mbox->payload_in))
353                         return PTR_ERR(mbox->payload_in);
354
355                 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
356                         dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n",
357                                 cxl_mem_opcode_to_name(opcode));
358                         kvfree(mbox->payload_in);
359                         return -EBUSY;
360                 }
361         }
362
363         /* Prepare to handle a full payload for variable sized output */
364         if (out_size == CXL_VARIABLE_PAYLOAD)
365                 mbox->size_out = mds->payload_size;
366         else
367                 mbox->size_out = out_size;
368
369         if (mbox->size_out) {
370                 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
371                 if (!mbox->payload_out) {
372                         kvfree(mbox->payload_in);
373                         return -ENOMEM;
374                 }
375         }
376         return 0;
377 }
378
379 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
380 {
381         kvfree(mbox->payload_in);
382         kvfree(mbox->payload_out);
383 }
384
385 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
386                               const struct cxl_send_command *send_cmd,
387                               struct cxl_memdev_state *mds)
388 {
389         if (send_cmd->raw.rsvd)
390                 return -EINVAL;
391
392         /*
393          * Unlike supported commands, the output size of RAW commands
394          * gets passed along without further checking, so it must be
395          * validated here.
396          */
397         if (send_cmd->out.size > mds->payload_size)
398                 return -EINVAL;
399
400         if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
401                 return -EPERM;
402
403         dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n");
404
405         *mem_cmd = (struct cxl_mem_command) {
406                 .info = {
407                         .id = CXL_MEM_COMMAND_ID_RAW,
408                         .size_in = send_cmd->in.size,
409                         .size_out = send_cmd->out.size,
410                 },
411                 .opcode = send_cmd->raw.opcode
412         };
413
414         return 0;
415 }
416
417 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
418                           const struct cxl_send_command *send_cmd,
419                           struct cxl_memdev_state *mds)
420 {
421         struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
422         const struct cxl_command_info *info = &c->info;
423
424         if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
425                 return -EINVAL;
426
427         if (send_cmd->rsvd)
428                 return -EINVAL;
429
430         if (send_cmd->in.rsvd || send_cmd->out.rsvd)
431                 return -EINVAL;
432
433         /* Check that the command is enabled for hardware */
434         if (!test_bit(info->id, mds->enabled_cmds))
435                 return -ENOTTY;
436
437         /* Check that the command is not claimed for exclusive kernel use */
438         if (test_bit(info->id, mds->exclusive_cmds))
439                 return -EBUSY;
440
441         /* Check the input buffer is the expected size */
442         if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
443             (info->size_in != send_cmd->in.size))
444                 return -ENOMEM;
445
446         /* Check the output buffer is at least large enough */
447         if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
448             (send_cmd->out.size < info->size_out))
449                 return -ENOMEM;
450
451         *mem_cmd = (struct cxl_mem_command) {
452                 .info = {
453                         .id = info->id,
454                         .flags = info->flags,
455                         .size_in = send_cmd->in.size,
456                         .size_out = send_cmd->out.size,
457                 },
458                 .opcode = c->opcode
459         };
460
461         return 0;
462 }
463
464 /**
465  * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
466  * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
467  * @mds: The driver data for the operation
468  * @send_cmd: &struct cxl_send_command copied in from userspace.
469  *
470  * Return:
471  *  * %0        - @out_cmd is ready to send.
472  *  * %-ENOTTY  - Invalid command specified.
473  *  * %-EINVAL  - Reserved fields or invalid values were used.
474  *  * %-ENOMEM  - Input or output buffer wasn't sized properly.
475  *  * %-EPERM   - Attempted to use a protected command.
476  *  * %-EBUSY   - Kernel has claimed exclusive access to this opcode
477  *
478  * The result of this command is a fully validated command in @mbox_cmd that is
479  * safe to send to the hardware.
480  */
481 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
482                                       struct cxl_memdev_state *mds,
483                                       const struct cxl_send_command *send_cmd)
484 {
485         struct cxl_mem_command mem_cmd;
486         int rc;
487
488         if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
489                 return -ENOTTY;
490
491         /*
492          * The user can never specify an input payload larger than what hardware
493          * supports, but output can be arbitrarily large (simply write out as
494          * much data as the hardware provides).
495          */
496         if (send_cmd->in.size > mds->payload_size)
497                 return -EINVAL;
498
499         /* Sanitize and construct a cxl_mem_command */
500         if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
501                 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds);
502         else
503                 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds);
504
505         if (rc)
506                 return rc;
507
508         /* Sanitize and construct a cxl_mbox_cmd */
509         return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode,
510                                  mem_cmd.info.size_in, mem_cmd.info.size_out,
511                                  send_cmd->in.payload);
512 }
513
514 int cxl_query_cmd(struct cxl_memdev *cxlmd,
515                   struct cxl_mem_query_commands __user *q)
516 {
517         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
518         struct device *dev = &cxlmd->dev;
519         struct cxl_mem_command *cmd;
520         u32 n_commands;
521         int j = 0;
522
523         dev_dbg(dev, "Query IOCTL\n");
524
525         if (get_user(n_commands, &q->n_commands))
526                 return -EFAULT;
527
528         /* returns the total number if 0 elements are requested. */
529         if (n_commands == 0)
530                 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
531
532         /*
533          * otherwise, return max(n_commands, total commands) cxl_command_info
534          * structures.
535          */
536         cxl_for_each_cmd(cmd) {
537                 struct cxl_command_info info = cmd->info;
538
539                 if (test_bit(info.id, mds->enabled_cmds))
540                         info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
541                 if (test_bit(info.id, mds->exclusive_cmds))
542                         info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
543
544                 if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
545                         return -EFAULT;
546
547                 if (j == n_commands)
548                         break;
549         }
550
551         return 0;
552 }
553
554 /**
555  * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
556  * @mds: The driver data for the operation
557  * @mbox_cmd: The validated mailbox command.
558  * @out_payload: Pointer to userspace's output payload.
559  * @size_out: (Input) Max payload size to copy out.
560  *            (Output) Payload size hardware generated.
561  * @retval: Hardware generated return code from the operation.
562  *
563  * Return:
564  *  * %0        - Mailbox transaction succeeded. This implies the mailbox
565  *                protocol completed successfully not that the operation itself
566  *                was successful.
567  *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
568  *  * %-EFAULT  - Something happened with copy_to/from_user.
569  *  * %-EINTR   - Mailbox acquisition interrupted.
570  *  * %-EXXX    - Transaction level failures.
571  *
572  * Dispatches a mailbox command on behalf of a userspace request.
573  * The output payload is copied to userspace.
574  *
575  * See cxl_send_cmd().
576  */
577 static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
578                                         struct cxl_mbox_cmd *mbox_cmd,
579                                         u64 out_payload, s32 *size_out,
580                                         u32 *retval)
581 {
582         struct device *dev = mds->cxlds.dev;
583         int rc;
584
585         dev_dbg(dev,
586                 "Submitting %s command for user\n"
587                 "\topcode: %x\n"
588                 "\tsize: %zx\n",
589                 cxl_mem_opcode_to_name(mbox_cmd->opcode),
590                 mbox_cmd->opcode, mbox_cmd->size_in);
591
592         rc = mds->mbox_send(mds, mbox_cmd);
593         if (rc)
594                 goto out;
595
596         /*
597          * @size_out contains the max size that's allowed to be written back out
598          * to userspace. While the payload may have written more output than
599          * this it will have to be ignored.
600          */
601         if (mbox_cmd->size_out) {
602                 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
603                               "Invalid return size\n");
604                 if (copy_to_user(u64_to_user_ptr(out_payload),
605                                  mbox_cmd->payload_out, mbox_cmd->size_out)) {
606                         rc = -EFAULT;
607                         goto out;
608                 }
609         }
610
611         *size_out = mbox_cmd->size_out;
612         *retval = mbox_cmd->return_code;
613
614 out:
615         cxl_mbox_cmd_dtor(mbox_cmd);
616         return rc;
617 }
618
619 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
620 {
621         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
622         struct device *dev = &cxlmd->dev;
623         struct cxl_send_command send;
624         struct cxl_mbox_cmd mbox_cmd;
625         int rc;
626
627         dev_dbg(dev, "Send IOCTL\n");
628
629         if (copy_from_user(&send, s, sizeof(send)))
630                 return -EFAULT;
631
632         rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send);
633         if (rc)
634                 return rc;
635
636         rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload,
637                                           &send.out.size, &send.retval);
638         if (rc)
639                 return rc;
640
641         if (copy_to_user(s, &send, sizeof(send)))
642                 return -EFAULT;
643
644         return 0;
645 }
646
647 static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
648                         u32 *size, u8 *out)
649 {
650         u32 remaining = *size;
651         u32 offset = 0;
652
653         while (remaining) {
654                 u32 xfer_size = min_t(u32, remaining, mds->payload_size);
655                 struct cxl_mbox_cmd mbox_cmd;
656                 struct cxl_mbox_get_log log;
657                 int rc;
658
659                 log = (struct cxl_mbox_get_log) {
660                         .uuid = *uuid,
661                         .offset = cpu_to_le32(offset),
662                         .length = cpu_to_le32(xfer_size),
663                 };
664
665                 mbox_cmd = (struct cxl_mbox_cmd) {
666                         .opcode = CXL_MBOX_OP_GET_LOG,
667                         .size_in = sizeof(log),
668                         .payload_in = &log,
669                         .size_out = xfer_size,
670                         .payload_out = out,
671                 };
672
673                 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
674
675                 /*
676                  * The output payload length that indicates the number
677                  * of valid bytes can be smaller than the Log buffer
678                  * size.
679                  */
680                 if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
681                         offset += mbox_cmd.size_out;
682                         break;
683                 }
684
685                 if (rc < 0)
686                         return rc;
687
688                 out += xfer_size;
689                 remaining -= xfer_size;
690                 offset += xfer_size;
691         }
692
693         *size = offset;
694
695         return 0;
696 }
697
698 /**
699  * cxl_walk_cel() - Walk through the Command Effects Log.
700  * @mds: The driver data for the operation
701  * @size: Length of the Command Effects Log.
702  * @cel: CEL
703  *
704  * Iterate over each entry in the CEL and determine if the driver supports the
705  * command. If so, the command is enabled for the device and can be used later.
706  */
707 static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
708 {
709         struct cxl_cel_entry *cel_entry;
710         const int cel_entries = size / sizeof(*cel_entry);
711         struct device *dev = mds->cxlds.dev;
712         int i;
713
714         cel_entry = (struct cxl_cel_entry *) cel;
715
716         for (i = 0; i < cel_entries; i++) {
717                 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
718                 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
719                 int enabled = 0;
720
721                 if (cmd) {
722                         set_bit(cmd->info.id, mds->enabled_cmds);
723                         enabled++;
724                 }
725
726                 if (cxl_is_poison_command(opcode)) {
727                         cxl_set_poison_cmd_enabled(&mds->poison, opcode);
728                         enabled++;
729                 }
730
731                 if (cxl_is_security_command(opcode)) {
732                         cxl_set_security_cmd_enabled(&mds->security, opcode);
733                         enabled++;
734                 }
735
736                 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
737                         enabled ? "enabled" : "unsupported by driver");
738         }
739 }
740
741 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
742 {
743         struct cxl_mbox_get_supported_logs *ret;
744         struct cxl_mbox_cmd mbox_cmd;
745         int rc;
746
747         ret = kvmalloc(mds->payload_size, GFP_KERNEL);
748         if (!ret)
749                 return ERR_PTR(-ENOMEM);
750
751         mbox_cmd = (struct cxl_mbox_cmd) {
752                 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
753                 .size_out = mds->payload_size,
754                 .payload_out = ret,
755                 /* At least the record number field must be valid */
756                 .min_out = 2,
757         };
758         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
759         if (rc < 0) {
760                 kvfree(ret);
761                 return ERR_PTR(rc);
762         }
763
764
765         return ret;
766 }
767
768 enum {
769         CEL_UUID,
770         VENDOR_DEBUG_UUID,
771 };
772
773 /* See CXL 2.0 Table 170. Get Log Input Payload */
774 static const uuid_t log_uuid[] = {
775         [CEL_UUID] = DEFINE_CXL_CEL_UUID,
776         [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
777 };
778
779 /**
780  * cxl_enumerate_cmds() - Enumerate commands for a device.
781  * @mds: The driver data for the operation
782  *
783  * Returns 0 if enumerate completed successfully.
784  *
785  * CXL devices have optional support for certain commands. This function will
786  * determine the set of supported commands for the hardware and update the
787  * enabled_cmds bitmap in the @mds.
788  */
789 int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
790 {
791         struct cxl_mbox_get_supported_logs *gsl;
792         struct device *dev = mds->cxlds.dev;
793         struct cxl_mem_command *cmd;
794         int i, rc;
795
796         gsl = cxl_get_gsl(mds);
797         if (IS_ERR(gsl))
798                 return PTR_ERR(gsl);
799
800         rc = -ENOENT;
801         for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
802                 u32 size = le32_to_cpu(gsl->entry[i].size);
803                 uuid_t uuid = gsl->entry[i].uuid;
804                 u8 *log;
805
806                 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
807
808                 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
809                         continue;
810
811                 log = kvmalloc(size, GFP_KERNEL);
812                 if (!log) {
813                         rc = -ENOMEM;
814                         goto out;
815                 }
816
817                 rc = cxl_xfer_log(mds, &uuid, &size, log);
818                 if (rc) {
819                         kvfree(log);
820                         goto out;
821                 }
822
823                 cxl_walk_cel(mds, size, log);
824                 kvfree(log);
825
826                 /* In case CEL was bogus, enable some default commands. */
827                 cxl_for_each_cmd(cmd)
828                         if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
829                                 set_bit(cmd->info.id, mds->enabled_cmds);
830
831                 /* Found the required CEL */
832                 rc = 0;
833         }
834 out:
835         kvfree(gsl);
836         return rc;
837 }
838 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
839
840 void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
841                             enum cxl_event_log_type type,
842                             enum cxl_event_type event_type,
843                             const uuid_t *uuid, union cxl_event *evt)
844 {
845         if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
846                 trace_cxl_general_media(cxlmd, type, &evt->gen_media);
847         else if (event_type == CXL_CPER_EVENT_DRAM)
848                 trace_cxl_dram(cxlmd, type, &evt->dram);
849         else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
850                 trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
851         else
852                 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
853 }
854 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
855
856 static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
857                                      enum cxl_event_log_type type,
858                                      struct cxl_event_record_raw *record)
859 {
860         enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
861         const uuid_t *uuid = &record->id;
862
863         if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
864                 ev_type = CXL_CPER_EVENT_GEN_MEDIA;
865         else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
866                 ev_type = CXL_CPER_EVENT_DRAM;
867         else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
868                 ev_type = CXL_CPER_EVENT_MEM_MODULE;
869
870         cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
871 }
872
873 static int cxl_clear_event_record(struct cxl_memdev_state *mds,
874                                   enum cxl_event_log_type log,
875                                   struct cxl_get_event_payload *get_pl)
876 {
877         struct cxl_mbox_clear_event_payload *payload;
878         u16 total = le16_to_cpu(get_pl->record_count);
879         u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
880         size_t pl_size = struct_size(payload, handles, max_handles);
881         struct cxl_mbox_cmd mbox_cmd;
882         u16 cnt;
883         int rc = 0;
884         int i;
885
886         /* Payload size may limit the max handles */
887         if (pl_size > mds->payload_size) {
888                 max_handles = (mds->payload_size - sizeof(*payload)) /
889                               sizeof(__le16);
890                 pl_size = struct_size(payload, handles, max_handles);
891         }
892
893         payload = kvzalloc(pl_size, GFP_KERNEL);
894         if (!payload)
895                 return -ENOMEM;
896
897         *payload = (struct cxl_mbox_clear_event_payload) {
898                 .event_log = log,
899         };
900
901         mbox_cmd = (struct cxl_mbox_cmd) {
902                 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
903                 .payload_in = payload,
904                 .size_in = pl_size,
905         };
906
907         /*
908          * Clear Event Records uses u8 for the handle cnt while Get Event
909          * Record can return up to 0xffff records.
910          */
911         i = 0;
912         for (cnt = 0; cnt < total; cnt++) {
913                 struct cxl_event_record_raw *raw = &get_pl->records[cnt];
914                 struct cxl_event_generic *gen = &raw->event.generic;
915
916                 payload->handles[i++] = gen->hdr.handle;
917                 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
918                         le16_to_cpu(payload->handles[i]));
919
920                 if (i == max_handles) {
921                         payload->nr_recs = i;
922                         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
923                         if (rc)
924                                 goto free_pl;
925                         i = 0;
926                 }
927         }
928
929         /* Clear what is left if any */
930         if (i) {
931                 payload->nr_recs = i;
932                 mbox_cmd.size_in = struct_size(payload, handles, i);
933                 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
934                 if (rc)
935                         goto free_pl;
936         }
937
938 free_pl:
939         kvfree(payload);
940         return rc;
941 }
942
943 static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
944                                     enum cxl_event_log_type type)
945 {
946         struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
947         struct device *dev = mds->cxlds.dev;
948         struct cxl_get_event_payload *payload;
949         struct cxl_mbox_cmd mbox_cmd;
950         u8 log_type = type;
951         u16 nr_rec;
952
953         mutex_lock(&mds->event.log_lock);
954         payload = mds->event.buf;
955
956         mbox_cmd = (struct cxl_mbox_cmd) {
957                 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
958                 .payload_in = &log_type,
959                 .size_in = sizeof(log_type),
960                 .payload_out = payload,
961                 .size_out = mds->payload_size,
962                 .min_out = struct_size(payload, records, 0),
963         };
964
965         do {
966                 int rc, i;
967
968                 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
969                 if (rc) {
970                         dev_err_ratelimited(dev,
971                                 "Event log '%d': Failed to query event records : %d",
972                                 type, rc);
973                         break;
974                 }
975
976                 nr_rec = le16_to_cpu(payload->record_count);
977                 if (!nr_rec)
978                         break;
979
980                 for (i = 0; i < nr_rec; i++)
981                         __cxl_event_trace_record(cxlmd, type,
982                                                  &payload->records[i]);
983
984                 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
985                         trace_cxl_overflow(cxlmd, type, payload);
986
987                 rc = cxl_clear_event_record(mds, type, payload);
988                 if (rc) {
989                         dev_err_ratelimited(dev,
990                                 "Event log '%d': Failed to clear events : %d",
991                                 type, rc);
992                         break;
993                 }
994         } while (nr_rec);
995
996         mutex_unlock(&mds->event.log_lock);
997 }
998
999 /**
1000  * cxl_mem_get_event_records - Get Event Records from the device
1001  * @mds: The driver data for the operation
1002  * @status: Event Status register value identifying which events are available.
1003  *
1004  * Retrieve all event records available on the device, report them as trace
1005  * events, and clear them.
1006  *
1007  * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1008  * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1009  */
1010 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1011 {
1012         dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1013
1014         if (status & CXLDEV_EVENT_STATUS_FATAL)
1015                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1016         if (status & CXLDEV_EVENT_STATUS_FAIL)
1017                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1018         if (status & CXLDEV_EVENT_STATUS_WARN)
1019                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1020         if (status & CXLDEV_EVENT_STATUS_INFO)
1021                 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1022 }
1023 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
1024
1025 /**
1026  * cxl_mem_get_partition_info - Get partition info
1027  * @mds: The driver data for the operation
1028  *
1029  * Retrieve the current partition info for the device specified.  The active
1030  * values are the current capacity in bytes.  If not 0, the 'next' values are
1031  * the pending values, in bytes, which take affect on next cold reset.
1032  *
1033  * Return: 0 if no error: or the result of the mailbox command.
1034  *
1035  * See CXL @8.2.9.5.2.1 Get Partition Info
1036  */
1037 static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1038 {
1039         struct cxl_mbox_get_partition_info pi;
1040         struct cxl_mbox_cmd mbox_cmd;
1041         int rc;
1042
1043         mbox_cmd = (struct cxl_mbox_cmd) {
1044                 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1045                 .size_out = sizeof(pi),
1046                 .payload_out = &pi,
1047         };
1048         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1049         if (rc)
1050                 return rc;
1051
1052         mds->active_volatile_bytes =
1053                 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1054         mds->active_persistent_bytes =
1055                 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1056         mds->next_volatile_bytes =
1057                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1058         mds->next_persistent_bytes =
1059                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1060
1061         return 0;
1062 }
1063
1064 /**
1065  * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1066  * @mds: The driver data for the operation
1067  *
1068  * Return: 0 if identify was executed successfully or media not ready.
1069  *
1070  * This will dispatch the identify command to the device and on success populate
1071  * structures to be exported to sysfs.
1072  */
1073 int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1074 {
1075         /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1076         struct cxl_mbox_identify id;
1077         struct cxl_mbox_cmd mbox_cmd;
1078         u32 val;
1079         int rc;
1080
1081         if (!mds->cxlds.media_ready)
1082                 return 0;
1083
1084         mbox_cmd = (struct cxl_mbox_cmd) {
1085                 .opcode = CXL_MBOX_OP_IDENTIFY,
1086                 .size_out = sizeof(id),
1087                 .payload_out = &id,
1088         };
1089         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1090         if (rc < 0)
1091                 return rc;
1092
1093         mds->total_bytes =
1094                 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1095         mds->volatile_only_bytes =
1096                 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1097         mds->persistent_only_bytes =
1098                 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1099         mds->partition_align_bytes =
1100                 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1101
1102         mds->lsa_size = le32_to_cpu(id.lsa_size);
1103         memcpy(mds->firmware_version, id.fw_revision,
1104                sizeof(id.fw_revision));
1105
1106         if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1107                 val = get_unaligned_le24(id.poison_list_max_mer);
1108                 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1109         }
1110
1111         return 0;
1112 }
1113 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
1114
1115 static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1116 {
1117         int rc;
1118         u32 sec_out = 0;
1119         struct cxl_get_security_output {
1120                 __le32 flags;
1121         } out;
1122         struct cxl_mbox_cmd sec_cmd = {
1123                 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1124                 .payload_out = &out,
1125                 .size_out = sizeof(out),
1126         };
1127         struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1128         struct cxl_dev_state *cxlds = &mds->cxlds;
1129
1130         if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1131                 return -EINVAL;
1132
1133         rc = cxl_internal_send_cmd(mds, &sec_cmd);
1134         if (rc < 0) {
1135                 dev_err(cxlds->dev, "Failed to get security state : %d", rc);
1136                 return rc;
1137         }
1138
1139         /*
1140          * Prior to using these commands, any security applied to
1141          * the user data areas of the device shall be DISABLED (or
1142          * UNLOCKED for secure erase case).
1143          */
1144         sec_out = le32_to_cpu(out.flags);
1145         if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1146                 return -EINVAL;
1147
1148         if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1149             sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1150                 return -EINVAL;
1151
1152         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1153         if (rc < 0) {
1154                 dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
1155                 return rc;
1156         }
1157
1158         return 0;
1159 }
1160
1161
1162 /**
1163  * cxl_mem_sanitize() - Send a sanitization command to the device.
1164  * @cxlmd: The device for the operation
1165  * @cmd: The specific sanitization command opcode
1166  *
1167  * Return: 0 if the command was executed successfully, regardless of
1168  * whether or not the actual security operation is done in the background,
1169  * such as for the Sanitize case.
1170  * Error return values can be the result of the mailbox command, -EINVAL
1171  * when security requirements are not met or invalid contexts, or -EBUSY
1172  * if the sanitize operation is already in flight.
1173  *
1174  * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1175  */
1176 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1177 {
1178         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1179         struct cxl_port  *endpoint;
1180         int rc;
1181
1182         /* synchronize with cxl_mem_probe() and decoder write operations */
1183         device_lock(&cxlmd->dev);
1184         endpoint = cxlmd->endpoint;
1185         down_read(&cxl_region_rwsem);
1186         /*
1187          * Require an endpoint to be safe otherwise the driver can not
1188          * be sure that the device is unmapped.
1189          */
1190         if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1191                 rc = __cxl_mem_sanitize(mds, cmd);
1192         else
1193                 rc = -EBUSY;
1194         up_read(&cxl_region_rwsem);
1195         device_unlock(&cxlmd->dev);
1196
1197         return rc;
1198 }
1199
1200 static int add_dpa_res(struct device *dev, struct resource *parent,
1201                        struct resource *res, resource_size_t start,
1202                        resource_size_t size, const char *type)
1203 {
1204         int rc;
1205
1206         res->name = type;
1207         res->start = start;
1208         res->end = start + size - 1;
1209         res->flags = IORESOURCE_MEM;
1210         if (resource_size(res) == 0) {
1211                 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
1212                 return 0;
1213         }
1214         rc = request_resource(parent, res);
1215         if (rc) {
1216                 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
1217                         res, rc);
1218                 return rc;
1219         }
1220
1221         dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
1222
1223         return 0;
1224 }
1225
1226 int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
1227 {
1228         struct cxl_dev_state *cxlds = &mds->cxlds;
1229         struct device *dev = cxlds->dev;
1230         int rc;
1231
1232         if (!cxlds->media_ready) {
1233                 cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
1234                 cxlds->ram_res = DEFINE_RES_MEM(0, 0);
1235                 cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
1236                 return 0;
1237         }
1238
1239         cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
1240
1241         if (mds->partition_align_bytes == 0) {
1242                 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1243                                  mds->volatile_only_bytes, "ram");
1244                 if (rc)
1245                         return rc;
1246                 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1247                                    mds->volatile_only_bytes,
1248                                    mds->persistent_only_bytes, "pmem");
1249         }
1250
1251         rc = cxl_mem_get_partition_info(mds);
1252         if (rc) {
1253                 dev_err(dev, "Failed to query partition information\n");
1254                 return rc;
1255         }
1256
1257         rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
1258                          mds->active_volatile_bytes, "ram");
1259         if (rc)
1260                 return rc;
1261         return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1262                            mds->active_volatile_bytes,
1263                            mds->active_persistent_bytes, "pmem");
1264 }
1265 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
1266
1267 int cxl_set_timestamp(struct cxl_memdev_state *mds)
1268 {
1269         struct cxl_mbox_cmd mbox_cmd;
1270         struct cxl_mbox_set_timestamp_in pi;
1271         int rc;
1272
1273         pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1274         mbox_cmd = (struct cxl_mbox_cmd) {
1275                 .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1276                 .size_in = sizeof(pi),
1277                 .payload_in = &pi,
1278         };
1279
1280         rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1281         /*
1282          * Command is optional. Devices may have another way of providing
1283          * a timestamp, or may return all 0s in timestamp fields.
1284          * Don't report an error if this command isn't supported
1285          */
1286         if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1287                 return rc;
1288
1289         return 0;
1290 }
1291 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
1292
1293 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1294                        struct cxl_region *cxlr)
1295 {
1296         struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1297         struct cxl_mbox_poison_out *po;
1298         struct cxl_mbox_poison_in pi;
1299         struct cxl_mbox_cmd mbox_cmd;
1300         int nr_records = 0;
1301         int rc;
1302
1303         rc = mutex_lock_interruptible(&mds->poison.lock);
1304         if (rc)
1305                 return rc;
1306
1307         po = mds->poison.list_out;
1308         pi.offset = cpu_to_le64(offset);
1309         pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1310
1311         mbox_cmd = (struct cxl_mbox_cmd) {
1312                 .opcode = CXL_MBOX_OP_GET_POISON,
1313                 .size_in = sizeof(pi),
1314                 .payload_in = &pi,
1315                 .size_out = mds->payload_size,
1316                 .payload_out = po,
1317                 .min_out = struct_size(po, record, 0),
1318         };
1319
1320         do {
1321                 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
1322                 if (rc)
1323                         break;
1324
1325                 for (int i = 0; i < le16_to_cpu(po->count); i++)
1326                         trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1327                                          po->flags, po->overflow_ts,
1328                                          CXL_POISON_TRACE_LIST);
1329
1330                 /* Protect against an uncleared _FLAG_MORE */
1331                 nr_records = nr_records + le16_to_cpu(po->count);
1332                 if (nr_records >= mds->poison.max_errors) {
1333                         dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1334                                 nr_records);
1335                         break;
1336                 }
1337         } while (po->flags & CXL_POISON_FLAG_MORE);
1338
1339         mutex_unlock(&mds->poison.lock);
1340         return rc;
1341 }
1342 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
1343
1344 static void free_poison_buf(void *buf)
1345 {
1346         kvfree(buf);
1347 }
1348
1349 /* Get Poison List output buffer is protected by mds->poison.lock */
1350 static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1351 {
1352         mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
1353         if (!mds->poison.list_out)
1354                 return -ENOMEM;
1355
1356         return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1357                                         mds->poison.list_out);
1358 }
1359
1360 int cxl_poison_state_init(struct cxl_memdev_state *mds)
1361 {
1362         int rc;
1363
1364         if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1365                 return 0;
1366
1367         rc = cxl_poison_alloc_buf(mds);
1368         if (rc) {
1369                 clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1370                 return rc;
1371         }
1372
1373         mutex_init(&mds->poison.lock);
1374         return 0;
1375 }
1376 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
1377
1378 struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1379 {
1380         struct cxl_memdev_state *mds;
1381
1382         mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1383         if (!mds) {
1384                 dev_err(dev, "No memory available\n");
1385                 return ERR_PTR(-ENOMEM);
1386         }
1387
1388         mutex_init(&mds->mbox_mutex);
1389         mutex_init(&mds->event.log_lock);
1390         mds->cxlds.dev = dev;
1391         mds->cxlds.reg_map.host = dev;
1392         mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1393         mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1394         INIT_LIST_HEAD(&mds->ram_perf_list);
1395         INIT_LIST_HEAD(&mds->pmem_perf_list);
1396
1397         return mds;
1398 }
1399 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
1400
1401 void __init cxl_mbox_init(void)
1402 {
1403         struct dentry *mbox_debugfs;
1404
1405         mbox_debugfs = cxl_debugfs_create_dir("mbox");
1406         debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1407                             &cxl_raw_allow_all);
1408 }