Documentation: embargoed-hardware-issues.rst: Add myself for Power
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / aqua_vanjaram.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "soc15.h"
25
26 #include "soc15_common.h"
27 #include "amdgpu_reg_state.h"
28 #include "amdgpu_xcp.h"
29 #include "gfx_v9_4_3.h"
30 #include "gfxhub_v1_2.h"
31 #include "sdma_v4_4_2.h"
32
33 #define XCP_INST_MASK(num_inst, xcp_id)                                        \
34         (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
35
36 #define AMDGPU_XCP_OPS_KFD      (1 << 0)
37
38 void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
39 {
40         int i;
41
42         adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
43
44         adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
45
46         adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
47         adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
48         adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
49
50         adev->doorbell_index.sdma_doorbell_range = 20;
51         for (i = 0; i < adev->sdma.num_instances; i++)
52                 adev->doorbell_index.sdma_engine[i] =
53                         AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
54                         i * (adev->doorbell_index.sdma_doorbell_range >> 1);
55
56         adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
57         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
58
59         adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
60         adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
61
62         adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
63 }
64
65 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
66                              uint32_t inst_idx, struct amdgpu_ring *ring)
67 {
68         int xcp_id;
69         enum AMDGPU_XCP_IP_BLOCK ip_blk;
70         uint32_t inst_mask;
71
72         ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
73         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
74                 return;
75
76         inst_mask = 1 << inst_idx;
77
78         switch (ring->funcs->type) {
79         case AMDGPU_HW_IP_GFX:
80         case AMDGPU_RING_TYPE_COMPUTE:
81         case AMDGPU_RING_TYPE_KIQ:
82                 ip_blk = AMDGPU_XCP_GFX;
83                 break;
84         case AMDGPU_RING_TYPE_SDMA:
85                 ip_blk = AMDGPU_XCP_SDMA;
86                 break;
87         case AMDGPU_RING_TYPE_VCN_ENC:
88         case AMDGPU_RING_TYPE_VCN_JPEG:
89                 ip_blk = AMDGPU_XCP_VCN;
90                 if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
91                         inst_mask = 1 << (inst_idx * 2);
92                 break;
93         default:
94                 DRM_ERROR("Not support ring type %d!", ring->funcs->type);
95                 return;
96         }
97
98         for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
99                 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
100                         ring->xcp_id = xcp_id;
101                         break;
102                 }
103         }
104 }
105
106 static void aqua_vanjaram_xcp_gpu_sched_update(
107                 struct amdgpu_device *adev,
108                 struct amdgpu_ring *ring,
109                 unsigned int sel_xcp_id)
110 {
111         unsigned int *num_gpu_sched;
112
113         num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
114                         .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
115         adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
116                         .sched[(*num_gpu_sched)++] = &ring->sched;
117         DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
118                         sel_xcp_id, ring->funcs->type,
119                         ring->hw_prio, *num_gpu_sched);
120 }
121
122 static int aqua_vanjaram_xcp_sched_list_update(
123                 struct amdgpu_device *adev)
124 {
125         struct amdgpu_ring *ring;
126         int i;
127
128         for (i = 0; i < MAX_XCP; i++) {
129                 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
130                 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
131         }
132
133         if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
134                 return 0;
135
136         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
137                 ring = adev->rings[i];
138                 if (!ring || !ring->sched.ready || ring->no_scheduler)
139                         continue;
140
141                 aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
142
143                 /* VCN is shared by two partitions under CPX MODE */
144                 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
145                         ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
146                         adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
147                         aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
148         }
149
150         return 0;
151 }
152
153 static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
154 {
155         int i;
156
157         for (i = 0; i < adev->num_rings; i++) {
158                 struct amdgpu_ring *ring = adev->rings[i];
159
160                 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
161                         ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
162                         aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
163                 else
164                         aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
165         }
166
167         return aqua_vanjaram_xcp_sched_list_update(adev);
168 }
169
170 static int aqua_vanjaram_select_scheds(
171                 struct amdgpu_device *adev,
172                 u32 hw_ip,
173                 u32 hw_prio,
174                 struct amdgpu_fpriv *fpriv,
175                 unsigned int *num_scheds,
176                 struct drm_gpu_scheduler ***scheds)
177 {
178         u32 sel_xcp_id;
179         int i;
180
181         if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
182                 u32 least_ref_cnt = ~0;
183
184                 fpriv->xcp_id = 0;
185                 for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
186                         u32 total_ref_cnt;
187
188                         total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
189                         if (total_ref_cnt < least_ref_cnt) {
190                                 fpriv->xcp_id = i;
191                                 least_ref_cnt = total_ref_cnt;
192                         }
193                 }
194         }
195         sel_xcp_id = fpriv->xcp_id;
196
197         if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
198                 *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
199                 *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
200                 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
201                 DRM_DEBUG("Selected partition #%d", sel_xcp_id);
202         } else {
203                 DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
204                 return -ENOENT;
205         }
206
207         return 0;
208 }
209
210 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
211                                          enum amd_hw_ip_block_type block,
212                                          int8_t inst)
213 {
214         int8_t dev_inst;
215
216         switch (block) {
217         case GC_HWIP:
218         case SDMA0_HWIP:
219         /* Both JPEG and VCN as JPEG is only alias of VCN */
220         case VCN_HWIP:
221                 dev_inst = adev->ip_map.dev_inst[block][inst];
222                 break;
223         default:
224                 /* For rest of the IPs, no look up required.
225                  * Assume 'logical instance == physical instance' for all configs. */
226                 dev_inst = inst;
227                 break;
228         }
229
230         return dev_inst;
231 }
232
233 static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
234                                          enum amd_hw_ip_block_type block,
235                                          uint32_t mask)
236 {
237         uint32_t dev_mask = 0;
238         int8_t log_inst, dev_inst;
239
240         while (mask) {
241                 log_inst = ffs(mask) - 1;
242                 dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
243                 dev_mask |= (1 << dev_inst);
244                 mask &= ~(1 << log_inst);
245         }
246
247         return dev_mask;
248 }
249
250 static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
251                                           enum amd_hw_ip_block_type ip_block,
252                                           uint32_t inst_mask)
253 {
254         int l = 0, i;
255
256         while (inst_mask) {
257                 i = ffs(inst_mask) - 1;
258                 adev->ip_map.dev_inst[ip_block][l++] = i;
259                 inst_mask &= ~(1 << i);
260         }
261         for (; l < HWIP_MAX_INSTANCE; l++)
262                 adev->ip_map.dev_inst[ip_block][l] = -1;
263 }
264
265 void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
266 {
267         u32 ip_map[][2] = {
268                 { GC_HWIP, adev->gfx.xcc_mask },
269                 { SDMA0_HWIP, adev->sdma.sdma_mask },
270                 { VCN_HWIP, adev->vcn.inst_mask },
271         };
272         int i;
273
274         for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
275                 aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
276
277         adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
278         adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
279 }
280
281 /* Fixed pattern for smn addressing on different AIDs:
282  *   bit[34]: indicate cross AID access
283  *   bit[33:32]: indicate target AID id
284  * AID id range is 0 ~ 3 as maximum AID number is 4.
285  */
286 u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
287 {
288         u64 ext_offset;
289
290         /* local routing and bit[34:32] will be zeros */
291         if (ext_id == 0)
292                 return 0;
293
294         /* Initiated from host, accessing to all non-zero aids are cross traffic */
295         ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
296
297         return ext_offset;
298 }
299
300 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
301 {
302         enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
303         struct amdgpu_device *adev = xcp_mgr->adev;
304
305         if (adev->nbio.funcs->get_compute_partition_mode)
306                 mode = adev->nbio.funcs->get_compute_partition_mode(adev);
307
308         return mode;
309 }
310
311 static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
312 {
313         int num_xcc, num_xcc_per_xcp = 0;
314
315         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
316
317         switch (mode) {
318         case AMDGPU_SPX_PARTITION_MODE:
319                 num_xcc_per_xcp = num_xcc;
320                 break;
321         case AMDGPU_DPX_PARTITION_MODE:
322                 num_xcc_per_xcp = num_xcc / 2;
323                 break;
324         case AMDGPU_TPX_PARTITION_MODE:
325                 num_xcc_per_xcp = num_xcc / 3;
326                 break;
327         case AMDGPU_QPX_PARTITION_MODE:
328                 num_xcc_per_xcp = num_xcc / 4;
329                 break;
330         case AMDGPU_CPX_PARTITION_MODE:
331                 num_xcc_per_xcp = 1;
332                 break;
333         }
334
335         return num_xcc_per_xcp;
336 }
337
338 static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
339                                     enum AMDGPU_XCP_IP_BLOCK ip_id,
340                                     struct amdgpu_xcp_ip *ip)
341 {
342         struct amdgpu_device *adev = xcp_mgr->adev;
343         int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
344         int num_sdma, num_vcn;
345
346         num_sdma = adev->sdma.num_instances;
347         num_vcn = adev->vcn.num_vcn_inst;
348
349         switch (xcp_mgr->mode) {
350         case AMDGPU_SPX_PARTITION_MODE:
351                 num_sdma_xcp = num_sdma;
352                 num_vcn_xcp = num_vcn;
353                 break;
354         case AMDGPU_DPX_PARTITION_MODE:
355                 num_sdma_xcp = num_sdma / 2;
356                 num_vcn_xcp = num_vcn / 2;
357                 break;
358         case AMDGPU_TPX_PARTITION_MODE:
359                 num_sdma_xcp = num_sdma / 3;
360                 num_vcn_xcp = num_vcn / 3;
361                 break;
362         case AMDGPU_QPX_PARTITION_MODE:
363                 num_sdma_xcp = num_sdma / 4;
364                 num_vcn_xcp = num_vcn / 4;
365                 break;
366         case AMDGPU_CPX_PARTITION_MODE:
367                 num_sdma_xcp = 2;
368                 num_vcn_xcp = num_vcn ? 1 : 0;
369                 break;
370         default:
371                 return -EINVAL;
372         }
373
374         num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
375
376         switch (ip_id) {
377         case AMDGPU_XCP_GFXHUB:
378                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
379                 ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
380                 break;
381         case AMDGPU_XCP_GFX:
382                 ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
383                 ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
384                 break;
385         case AMDGPU_XCP_SDMA:
386                 ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
387                 ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
388                 break;
389         case AMDGPU_XCP_VCN:
390                 ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
391                 /* TODO : Assign IP funcs */
392                 break;
393         default:
394                 return -EINVAL;
395         }
396
397         ip->ip_id = ip_id;
398
399         return 0;
400 }
401
402 static enum amdgpu_gfx_partition
403 __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
404 {
405         struct amdgpu_device *adev = xcp_mgr->adev;
406         int num_xcc;
407
408         num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
409
410         if (adev->gmc.num_mem_partitions == 1)
411                 return AMDGPU_SPX_PARTITION_MODE;
412
413         if (adev->gmc.num_mem_partitions == num_xcc)
414                 return AMDGPU_CPX_PARTITION_MODE;
415
416         if (adev->gmc.num_mem_partitions == num_xcc / 2)
417                 return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
418                                                     AMDGPU_QPX_PARTITION_MODE;
419
420         if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
421                 return AMDGPU_DPX_PARTITION_MODE;
422
423         return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
424 }
425
426 static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
427                                           enum amdgpu_gfx_partition mode)
428 {
429         struct amdgpu_device *adev = xcp_mgr->adev;
430         int num_xcc, num_xccs_per_xcp;
431
432         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
433         switch (mode) {
434         case AMDGPU_SPX_PARTITION_MODE:
435                 return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
436         case AMDGPU_DPX_PARTITION_MODE:
437                 return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
438         case AMDGPU_TPX_PARTITION_MODE:
439                 return (adev->gmc.num_mem_partitions == 1 ||
440                         adev->gmc.num_mem_partitions == 3) &&
441                        ((num_xcc % 3) == 0);
442         case AMDGPU_QPX_PARTITION_MODE:
443                 num_xccs_per_xcp = num_xcc / 4;
444                 return (adev->gmc.num_mem_partitions == 1 ||
445                         adev->gmc.num_mem_partitions == 4) &&
446                        (num_xccs_per_xcp >= 2);
447         case AMDGPU_CPX_PARTITION_MODE:
448                 return ((num_xcc > 1) &&
449                        (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
450                        (num_xcc % adev->gmc.num_mem_partitions) == 0);
451         default:
452                 return false;
453         }
454
455         return false;
456 }
457
458 static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
459 {
460         /* TODO:
461          * Stop user queues and threads, and make sure GPU is empty of work.
462          */
463
464         if (flags & AMDGPU_XCP_OPS_KFD)
465                 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
466
467         return 0;
468 }
469
470 static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
471 {
472         int ret = 0;
473
474         if (flags & AMDGPU_XCP_OPS_KFD) {
475                 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
476                 amdgpu_amdkfd_device_init(xcp_mgr->adev);
477                 /* If KFD init failed, return failure */
478                 if (!xcp_mgr->adev->kfd.init_complete)
479                         ret = -EIO;
480         }
481
482         return ret;
483 }
484
485 static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
486                                                int mode, int *num_xcps)
487 {
488         int num_xcc_per_xcp, num_xcc, ret;
489         struct amdgpu_device *adev;
490         u32 flags = 0;
491
492         adev = xcp_mgr->adev;
493         num_xcc = NUM_XCC(adev->gfx.xcc_mask);
494
495         if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
496                 mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
497         } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
498                 dev_err(adev->dev,
499                         "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
500                         amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
501                 return -EINVAL;
502         }
503
504         if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
505                 flags |= AMDGPU_XCP_OPS_KFD;
506
507         if (flags & AMDGPU_XCP_OPS_KFD) {
508                 ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
509                 if (ret)
510                         goto out;
511         }
512
513         ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
514         if (ret)
515                 goto unlock;
516
517         num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
518         if (adev->gfx.funcs->switch_partition_mode)
519                 adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
520                                                        num_xcc_per_xcp);
521
522         /* Init info about new xcps */
523         *num_xcps = num_xcc / num_xcc_per_xcp;
524         amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
525
526         ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
527 unlock:
528         if (flags & AMDGPU_XCP_OPS_KFD)
529                 amdgpu_amdkfd_unlock_kfd(adev);
530 out:
531         return ret;
532 }
533
534 static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
535                                           int xcc_id, uint8_t *mem_id)
536 {
537         /* memory/spatial modes validation check is already done */
538         *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
539         *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
540
541         return 0;
542 }
543
544 static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
545                                         struct amdgpu_xcp *xcp, uint8_t *mem_id)
546 {
547         struct amdgpu_numa_info numa_info;
548         struct amdgpu_device *adev;
549         uint32_t xcc_mask;
550         int r, i, xcc_id;
551
552         adev = xcp_mgr->adev;
553         /* TODO: BIOS is not returning the right info now
554          * Check on this later
555          */
556         /*
557         if (adev->gmc.gmc_funcs->query_mem_partition_mode)
558                 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
559         */
560         if (adev->gmc.num_mem_partitions == 1) {
561                 /* Only one range */
562                 *mem_id = 0;
563                 return 0;
564         }
565
566         r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
567         if (r || !xcc_mask)
568                 return -EINVAL;
569
570         xcc_id = ffs(xcc_mask) - 1;
571         if (!adev->gmc.is_app_apu)
572                 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
573
574         r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
575
576         if (r)
577                 return r;
578
579         r = -EINVAL;
580         for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
581                 if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
582                         *mem_id = i;
583                         r = 0;
584                         break;
585                 }
586         }
587
588         return r;
589 }
590
591 static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
592                                      enum AMDGPU_XCP_IP_BLOCK ip_id,
593                                      struct amdgpu_xcp_ip *ip)
594 {
595         if (!ip)
596                 return -EINVAL;
597
598         return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
599 }
600
601 struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
602         .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
603         .query_partition_mode = &aqua_vanjaram_query_partition_mode,
604         .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
605         .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
606         .select_scheds = &aqua_vanjaram_select_scheds,
607         .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
608 };
609
610 static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
611 {
612         int ret;
613
614         ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
615                                   &aqua_vanjaram_xcp_funcs);
616         if (ret)
617                 return ret;
618
619         /* TODO: Default memory node affinity init */
620
621         return ret;
622 }
623
624 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
625 {
626         u32 mask, inst_mask = adev->sdma.sdma_mask;
627         int ret, i;
628
629         /* generally 1 AID supports 4 instances */
630         adev->sdma.num_inst_per_aid = 4;
631         adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
632
633         adev->aid_mask = i = 1;
634         inst_mask >>= adev->sdma.num_inst_per_aid;
635
636         for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
637              inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
638                 if ((inst_mask & mask) == mask)
639                         adev->aid_mask |= (1 << i);
640         }
641
642         /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
643          * addressed based on logical instance ids.
644          */
645         adev->vcn.harvest_config = 0;
646         adev->vcn.num_inst_per_aid = 1;
647         adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
648         adev->jpeg.harvest_config = 0;
649         adev->jpeg.num_inst_per_aid = 1;
650         adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
651
652         ret = aqua_vanjaram_xcp_mgr_init(adev);
653         if (ret)
654                 return ret;
655
656         aqua_vanjaram_ip_map_init(adev);
657
658         return 0;
659 }
660
661 static void aqua_read_smn(struct amdgpu_device *adev,
662                           struct amdgpu_smn_reg_data *regdata,
663                           uint64_t smn_addr)
664 {
665         regdata->addr = smn_addr;
666         regdata->value = RREG32_PCIE(smn_addr);
667 }
668
669 struct aqua_reg_list {
670         uint64_t start_addr;
671         uint32_t num_regs;
672         uint32_t incrx;
673 };
674
675 #define DW_ADDR_INCR    4
676
677 static void aqua_read_smn_ext(struct amdgpu_device *adev,
678                               struct amdgpu_smn_reg_data *regdata,
679                               uint64_t smn_addr, int i)
680 {
681         regdata->addr =
682                 smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
683         regdata->value = RREG32_PCIE_EXT(regdata->addr);
684 }
685
686 #define smnreg_0x1A340218       0x1A340218
687 #define smnreg_0x1A3402E4       0x1A3402E4
688 #define smnreg_0x1A340294       0x1A340294
689 #define smreg_0x1A380088        0x1A380088
690
691 #define NUM_PCIE_SMN_REGS       14
692
693 static struct aqua_reg_list pcie_reg_addrs[] = {
694         { smnreg_0x1A340218, 1, 0 },
695         { smnreg_0x1A3402E4, 1, 0 },
696         { smnreg_0x1A340294, 6, DW_ADDR_INCR },
697         { smreg_0x1A380088, 6, DW_ADDR_INCR },
698 };
699
700 static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
701                                              void *buf, size_t max_size)
702 {
703         struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
704         uint32_t start_addr, incrx, num_regs, szbuf;
705         struct amdgpu_regs_pcie_v1_0 *pcie_regs;
706         struct amdgpu_smn_reg_data *reg_data;
707         struct pci_dev *us_pdev, *ds_pdev;
708         int aer_cap, r, n;
709
710         if (!buf || !max_size)
711                 return -EINVAL;
712
713         pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
714
715         szbuf = sizeof(*pcie_reg_state) +
716                 amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
717         /* Only one instance of pcie regs */
718         if (max_size < szbuf)
719                 return -EOVERFLOW;
720
721         pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
722                                                      sizeof(*pcie_reg_state));
723         pcie_regs->inst_header.instance = 0;
724         pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
725         pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
726
727         reg_data = pcie_regs->smn_reg_values;
728
729         for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
730                 start_addr = pcie_reg_addrs[r].start_addr;
731                 incrx = pcie_reg_addrs[r].incrx;
732                 num_regs = pcie_reg_addrs[r].num_regs;
733                 for (n = 0; n < num_regs; n++) {
734                         aqua_read_smn(adev, reg_data, start_addr + n * incrx);
735                         ++reg_data;
736                 }
737         }
738
739         ds_pdev = pci_upstream_bridge(adev->pdev);
740         us_pdev = pci_upstream_bridge(ds_pdev);
741
742         pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
743                                   &pcie_regs->device_status);
744         pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
745                                   &pcie_regs->link_status);
746
747         aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
748         if (aer_cap) {
749                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
750                                       &pcie_regs->pcie_corr_err_status);
751                 pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
752                                       &pcie_regs->pcie_uncorr_err_status);
753         }
754
755         pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
756                               &pcie_regs->sub_bus_number_latency);
757
758         pcie_reg_state->common_header.structure_size = szbuf;
759         pcie_reg_state->common_header.format_revision = 1;
760         pcie_reg_state->common_header.content_revision = 0;
761         pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
762         pcie_reg_state->common_header.num_instances = 1;
763
764         return pcie_reg_state->common_header.structure_size;
765 }
766
767 #define smnreg_0x11A00050       0x11A00050
768 #define smnreg_0x11A00180       0x11A00180
769 #define smnreg_0x11A00070       0x11A00070
770 #define smnreg_0x11A00200       0x11A00200
771 #define smnreg_0x11A0020C       0x11A0020C
772 #define smnreg_0x11A00210       0x11A00210
773 #define smnreg_0x11A00108       0x11A00108
774
775 #define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
776
777 #define NUM_XGMI_SMN_REGS 25
778
779 static struct aqua_reg_list xgmi_reg_addrs[] = {
780         { smnreg_0x11A00050, 1, 0 },
781         { smnreg_0x11A00180, 16, DW_ADDR_INCR },
782         { smnreg_0x11A00070, 4, DW_ADDR_INCR },
783         { smnreg_0x11A00200, 1, 0 },
784         { smnreg_0x11A0020C, 1, 0 },
785         { smnreg_0x11A00210, 1, 0 },
786         { smnreg_0x11A00108, 1, 0 },
787 };
788
789 static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
790                                              void *buf, size_t max_size)
791 {
792         struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
793         uint32_t start_addr, incrx, num_regs, szbuf;
794         struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
795         struct amdgpu_smn_reg_data *reg_data;
796         const int max_xgmi_instances = 8;
797         int inst = 0, i, j, r, n;
798         const int xgmi_inst = 2;
799         void *p;
800
801         if (!buf || !max_size)
802                 return -EINVAL;
803
804         xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
805
806         szbuf = sizeof(*xgmi_reg_state) +
807                 amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
808                                     NUM_XGMI_SMN_REGS);
809         /* Only one instance of pcie regs */
810         if (max_size < szbuf)
811                 return -EOVERFLOW;
812
813         p = &xgmi_reg_state->xgmi_state_regs[0];
814         for_each_inst(i, adev->aid_mask) {
815                 for (j = 0; j < xgmi_inst; ++j) {
816                         xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
817                         xgmi_regs->inst_header.instance = inst++;
818
819                         xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
820                         xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
821
822                         reg_data = xgmi_regs->smn_reg_values;
823
824                         for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
825                                 start_addr = xgmi_reg_addrs[r].start_addr;
826                                 incrx = xgmi_reg_addrs[r].incrx;
827                                 num_regs = xgmi_reg_addrs[r].num_regs;
828
829                                 for (n = 0; n < num_regs; n++) {
830                                         aqua_read_smn_ext(
831                                                 adev, reg_data,
832                                                 XGMI_LINK_REG(start_addr, j) +
833                                                         n * incrx,
834                                                 i);
835                                         ++reg_data;
836                                 }
837                         }
838                         p = reg_data;
839                 }
840         }
841
842         xgmi_reg_state->common_header.structure_size = szbuf;
843         xgmi_reg_state->common_header.format_revision = 1;
844         xgmi_reg_state->common_header.content_revision = 0;
845         xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
846         xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
847
848         return xgmi_reg_state->common_header.structure_size;
849 }
850
851 #define smnreg_0x11C00070       0x11C00070
852 #define smnreg_0x11C00210       0x11C00210
853
854 static struct aqua_reg_list wafl_reg_addrs[] = {
855         { smnreg_0x11C00070, 4, DW_ADDR_INCR },
856         { smnreg_0x11C00210, 1, 0 },
857 };
858
859 #define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
860
861 #define NUM_WAFL_SMN_REGS 5
862
863 static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
864                                              void *buf, size_t max_size)
865 {
866         struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
867         uint32_t start_addr, incrx, num_regs, szbuf;
868         struct amdgpu_regs_wafl_v1_0 *wafl_regs;
869         struct amdgpu_smn_reg_data *reg_data;
870         const int max_wafl_instances = 8;
871         int inst = 0, i, j, r, n;
872         const int wafl_inst = 2;
873         void *p;
874
875         if (!buf || !max_size)
876                 return -EINVAL;
877
878         wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
879
880         szbuf = sizeof(*wafl_reg_state) +
881                 amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
882                                     NUM_WAFL_SMN_REGS);
883
884         if (max_size < szbuf)
885                 return -EOVERFLOW;
886
887         p = &wafl_reg_state->wafl_state_regs[0];
888         for_each_inst(i, adev->aid_mask) {
889                 for (j = 0; j < wafl_inst; ++j) {
890                         wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
891                         wafl_regs->inst_header.instance = inst++;
892
893                         wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
894                         wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
895
896                         reg_data = wafl_regs->smn_reg_values;
897
898                         for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
899                                 start_addr = wafl_reg_addrs[r].start_addr;
900                                 incrx = wafl_reg_addrs[r].incrx;
901                                 num_regs = wafl_reg_addrs[r].num_regs;
902                                 for (n = 0; n < num_regs; n++) {
903                                         aqua_read_smn_ext(
904                                                 adev, reg_data,
905                                                 WAFL_LINK_REG(start_addr, j) +
906                                                         n * incrx,
907                                                 i);
908                                         ++reg_data;
909                                 }
910                         }
911                         p = reg_data;
912                 }
913         }
914
915         wafl_reg_state->common_header.structure_size = szbuf;
916         wafl_reg_state->common_header.format_revision = 1;
917         wafl_reg_state->common_header.content_revision = 0;
918         wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
919         wafl_reg_state->common_header.num_instances = max_wafl_instances;
920
921         return wafl_reg_state->common_header.structure_size;
922 }
923
924 #define smnreg_0x1B311060 0x1B311060
925 #define smnreg_0x1B411060 0x1B411060
926 #define smnreg_0x1B511060 0x1B511060
927 #define smnreg_0x1B611060 0x1B611060
928
929 #define smnreg_0x1C307120 0x1C307120
930 #define smnreg_0x1C317120 0x1C317120
931
932 #define smnreg_0x1C320830 0x1C320830
933 #define smnreg_0x1C380830 0x1C380830
934 #define smnreg_0x1C3D0830 0x1C3D0830
935 #define smnreg_0x1C420830 0x1C420830
936
937 #define smnreg_0x1C320100 0x1C320100
938 #define smnreg_0x1C380100 0x1C380100
939 #define smnreg_0x1C3D0100 0x1C3D0100
940 #define smnreg_0x1C420100 0x1C420100
941
942 #define smnreg_0x1B310500 0x1B310500
943 #define smnreg_0x1C300400 0x1C300400
944
945 #define USR_CAKE_INCR 0x11000
946 #define USR_LINK_INCR 0x100000
947 #define USR_CP_INCR 0x10000
948
949 #define NUM_USR_SMN_REGS        20
950
951 struct aqua_reg_list usr_reg_addrs[] = {
952         { smnreg_0x1B311060, 4, DW_ADDR_INCR },
953         { smnreg_0x1B411060, 4, DW_ADDR_INCR },
954         { smnreg_0x1B511060, 4, DW_ADDR_INCR },
955         { smnreg_0x1B611060, 4, DW_ADDR_INCR },
956         { smnreg_0x1C307120, 2, DW_ADDR_INCR },
957         { smnreg_0x1C317120, 2, DW_ADDR_INCR },
958 };
959
960 #define NUM_USR1_SMN_REGS       46
961 struct aqua_reg_list usr1_reg_addrs[] = {
962         { smnreg_0x1C320830, 6, USR_CAKE_INCR },
963         { smnreg_0x1C380830, 5, USR_CAKE_INCR },
964         { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
965         { smnreg_0x1C420830, 4, USR_CAKE_INCR },
966         { smnreg_0x1C320100, 6, USR_CAKE_INCR },
967         { smnreg_0x1C380100, 5, USR_CAKE_INCR },
968         { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
969         { smnreg_0x1C420100, 4, USR_CAKE_INCR },
970         { smnreg_0x1B310500, 4, USR_LINK_INCR },
971         { smnreg_0x1C300400, 2, USR_CP_INCR },
972 };
973
974 static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
975                                             void *buf, size_t max_size,
976                                             int reg_state)
977 {
978         uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
979         struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
980         struct amdgpu_regs_usr_v1_0 *usr_regs;
981         struct amdgpu_smn_reg_data *reg_data;
982         const int max_usr_instances = 4;
983         struct aqua_reg_list *reg_addrs;
984         int inst = 0, i, n, r, arr_size;
985         void *p;
986
987         if (!buf || !max_size)
988                 return -EINVAL;
989
990         switch (reg_state) {
991         case AMDGPU_REG_STATE_TYPE_USR:
992                 arr_size = ARRAY_SIZE(usr_reg_addrs);
993                 reg_addrs = usr_reg_addrs;
994                 num_smn = NUM_USR_SMN_REGS;
995                 break;
996         case AMDGPU_REG_STATE_TYPE_USR_1:
997                 arr_size = ARRAY_SIZE(usr1_reg_addrs);
998                 reg_addrs = usr1_reg_addrs;
999                 num_smn = NUM_USR1_SMN_REGS;
1000                 break;
1001         default:
1002                 return -EINVAL;
1003         }
1004
1005         usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
1006
1007         szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
1008                                                              sizeof(*usr_regs),
1009                                                              num_smn);
1010         if (max_size < szbuf)
1011                 return -EOVERFLOW;
1012
1013         p = &usr_reg_state->usr_state_regs[0];
1014         for_each_inst(i, adev->aid_mask) {
1015                 usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
1016                 usr_regs->inst_header.instance = inst++;
1017                 usr_regs->inst_header.state = AMDGPU_INST_S_OK;
1018                 usr_regs->inst_header.num_smn_regs = num_smn;
1019                 reg_data = usr_regs->smn_reg_values;
1020
1021                 for (r = 0; r < arr_size; r++) {
1022                         start_addr = reg_addrs[r].start_addr;
1023                         incrx = reg_addrs[r].incrx;
1024                         num_regs = reg_addrs[r].num_regs;
1025                         for (n = 0; n < num_regs; n++) {
1026                                 aqua_read_smn_ext(adev, reg_data,
1027                                                   start_addr + n * incrx, i);
1028                                 reg_data++;
1029                         }
1030                 }
1031                 p = reg_data;
1032         }
1033
1034         usr_reg_state->common_header.structure_size = szbuf;
1035         usr_reg_state->common_header.format_revision = 1;
1036         usr_reg_state->common_header.content_revision = 0;
1037         usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
1038         usr_reg_state->common_header.num_instances = max_usr_instances;
1039
1040         return usr_reg_state->common_header.structure_size;
1041 }
1042
1043 ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
1044                                     enum amdgpu_reg_state reg_state, void *buf,
1045                                     size_t max_size)
1046 {
1047         ssize_t size;
1048
1049         switch (reg_state) {
1050         case AMDGPU_REG_STATE_TYPE_PCIE:
1051                 size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
1052                 break;
1053         case AMDGPU_REG_STATE_TYPE_XGMI:
1054                 size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
1055                 break;
1056         case AMDGPU_REG_STATE_TYPE_WAFL:
1057                 size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
1058                 break;
1059         case AMDGPU_REG_STATE_TYPE_USR:
1060                 size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
1061                                                     AMDGPU_REG_STATE_TYPE_USR);
1062                 break;
1063         case AMDGPU_REG_STATE_TYPE_USR_1:
1064                 size = aqua_vanjaram_read_usr_state(
1065                         adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
1066                 break;
1067         default:
1068                 return -EINVAL;
1069         }
1070
1071         return size;
1072 }