2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/pci.h>
28 #include <linux/xarray.h>
30 #include "amdgpu_ctx.h"
34 #define AMDGPU_XCP_MODE_NONE -1
35 #define AMDGPU_XCP_MODE_TRANS -2
37 #define AMDGPU_XCP_FL_NONE 0
38 #define AMDGPU_XCP_FL_LOCKED (1 << 0)
42 enum AMDGPU_XCP_IP_BLOCK {
50 enum AMDGPU_XCP_STATE {
51 AMDGPU_XCP_PREPARE_SUSPEND,
53 AMDGPU_XCP_PREPARE_RESUME,
57 struct amdgpu_xcp_ip_funcs {
58 int (*prepare_suspend)(void *handle, uint32_t inst_mask);
59 int (*suspend)(void *handle, uint32_t inst_mask);
60 int (*prepare_resume)(void *handle, uint32_t inst_mask);
61 int (*resume)(void *handle, uint32_t inst_mask);
64 struct amdgpu_xcp_ip {
65 struct amdgpu_xcp_ip_funcs *ip_funcs;
68 enum AMDGPU_XCP_IP_BLOCK ip_id;
73 struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS];
79 struct drm_device *ddev;
80 struct drm_device *rdev;
81 struct drm_device *pdev;
82 struct drm_driver *driver;
83 struct drm_vma_offset_manager *vma_offset_manager;
84 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
87 struct amdgpu_xcp_mgr {
88 struct amdgpu_device *adev;
89 struct mutex xcp_lock;
90 struct amdgpu_xcp_mgr_funcs *funcs;
92 struct amdgpu_xcp xcp[MAX_XCP];
96 /* Used to determine KFD memory size limits per XCP */
97 unsigned int num_xcp_per_mem_partition;
100 struct amdgpu_xcp_mgr_funcs {
101 int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode,
103 int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr);
104 int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
105 enum AMDGPU_XCP_IP_BLOCK ip_id,
106 struct amdgpu_xcp_ip *ip);
107 int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
108 struct amdgpu_xcp *xcp, uint8_t *mem_id);
110 int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
111 int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
112 int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
113 int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
114 int (*select_scheds)(struct amdgpu_device *adev,
115 u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
116 unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
117 int (*update_partition_sched_list)(struct amdgpu_device *adev);
120 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
121 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
122 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
123 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
125 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
126 int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs);
127 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode);
128 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
129 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode);
130 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
131 enum AMDGPU_XCP_IP_BLOCK ip, int instance);
133 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
134 enum AMDGPU_XCP_IP_BLOCK ip,
135 uint32_t *inst_mask);
137 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
138 const struct pci_device_id *ent);
139 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev);
140 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
141 struct amdgpu_fpriv *fpriv,
142 struct drm_file *file_priv);
143 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
144 struct amdgpu_ctx_entity *entity);
146 #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
147 ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
148 (adev)->xcp_mgr->funcs->select_scheds ? \
149 (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
150 #define amdgpu_xcp_update_partition_sched_list(adev) \
151 ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
152 (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
153 (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
155 static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
160 return xcp_mgr->num_xcps;
163 static inline struct amdgpu_xcp *
164 amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
169 while (*from < MAX_XCP) {
170 if (xcp_mgr->xcp[*from].valid)
171 return &xcp_mgr->xcp[*from];
178 #define for_each_xcp(xcp_mgr, xcp, i) \
179 for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
180 xcp = amdgpu_get_next_xcp(xcp_mgr, &i))