drm/amd/powerplay: no memory activity support on Vega10
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pmu.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Jonathan Kim <jonathan.kim@amd.com>
23  *
24  */
25
26 #include <linux/perf_event.h>
27 #include <linux/init.h>
28 #include "amdgpu.h"
29 #include "amdgpu_pmu.h"
30 #include "df_v3_6.h"
31
32 #define PMU_NAME_SIZE 32
33
34 /* record to keep track of pmu entry per pmu type per device */
35 struct amdgpu_pmu_entry {
36         struct list_head entry;
37         struct amdgpu_device *adev;
38         struct pmu pmu;
39         unsigned int pmu_perf_type;
40 };
41
42 static LIST_HEAD(amdgpu_pmu_list);
43
44
45 /* initialize perf counter */
46 static int amdgpu_perf_event_init(struct perf_event *event)
47 {
48         struct hw_perf_event *hwc = &event->hw;
49
50         /* test the event attr type check for PMU enumeration */
51         if (event->attr.type != event->pmu->type)
52                 return -ENOENT;
53
54         /* update the hw_perf_event struct with config data */
55         hwc->conf = event->attr.config;
56
57         return 0;
58 }
59
60 /* start perf counter */
61 static void amdgpu_perf_start(struct perf_event *event, int flags)
62 {
63         struct hw_perf_event *hwc = &event->hw;
64         struct amdgpu_pmu_entry *pe = container_of(event->pmu,
65                                                   struct amdgpu_pmu_entry,
66                                                   pmu);
67
68         if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
69                 return;
70
71         WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
72         hwc->state = 0;
73
74         switch (pe->pmu_perf_type) {
75         case PERF_TYPE_AMDGPU_DF:
76                 if (!(flags & PERF_EF_RELOAD))
77                         pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
78
79                 pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
80                 break;
81         default:
82                 break;
83         }
84
85         perf_event_update_userpage(event);
86
87 }
88
89 /* read perf counter */
90 static void amdgpu_perf_read(struct perf_event *event)
91 {
92         struct hw_perf_event *hwc = &event->hw;
93         struct amdgpu_pmu_entry *pe = container_of(event->pmu,
94                                                   struct amdgpu_pmu_entry,
95                                                   pmu);
96
97         u64 count, prev;
98
99         do {
100                 prev = local64_read(&hwc->prev_count);
101
102                 switch (pe->pmu_perf_type) {
103                 case PERF_TYPE_AMDGPU_DF:
104                         pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
105                                                           &count);
106                         break;
107                 default:
108                         count = 0;
109                         break;
110                 };
111         } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
112
113         local64_add(count - prev, &event->count);
114 }
115
116 /* stop perf counter */
117 static void amdgpu_perf_stop(struct perf_event *event, int flags)
118 {
119         struct hw_perf_event *hwc = &event->hw;
120         struct amdgpu_pmu_entry *pe = container_of(event->pmu,
121                                                   struct amdgpu_pmu_entry,
122                                                   pmu);
123
124         if (hwc->state & PERF_HES_UPTODATE)
125                 return;
126
127         switch (pe->pmu_perf_type) {
128         case PERF_TYPE_AMDGPU_DF:
129                 pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
130                 break;
131         default:
132                 break;
133         };
134
135         WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
136         hwc->state |= PERF_HES_STOPPED;
137
138         if (hwc->state & PERF_HES_UPTODATE)
139                 return;
140
141         amdgpu_perf_read(event);
142         hwc->state |= PERF_HES_UPTODATE;
143 }
144
145 /* add perf counter  */
146 static int amdgpu_perf_add(struct perf_event *event, int flags)
147 {
148         struct hw_perf_event *hwc = &event->hw;
149         int retval;
150
151         struct amdgpu_pmu_entry *pe = container_of(event->pmu,
152                                                   struct amdgpu_pmu_entry,
153                                                   pmu);
154
155         event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
156
157         switch (pe->pmu_perf_type) {
158         case PERF_TYPE_AMDGPU_DF:
159                 retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
160                 break;
161         default:
162                 return 0;
163         };
164
165         if (retval)
166                 return retval;
167
168         if (flags & PERF_EF_START)
169                 amdgpu_perf_start(event, PERF_EF_RELOAD);
170
171         return retval;
172
173 }
174
175 /* delete perf counter  */
176 static void amdgpu_perf_del(struct perf_event *event, int flags)
177 {
178         struct hw_perf_event *hwc = &event->hw;
179         struct amdgpu_pmu_entry *pe = container_of(event->pmu,
180                                                   struct amdgpu_pmu_entry,
181                                                   pmu);
182
183         amdgpu_perf_stop(event, PERF_EF_UPDATE);
184
185         switch (pe->pmu_perf_type) {
186         case PERF_TYPE_AMDGPU_DF:
187                 pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
188                 break;
189         default:
190                 break;
191         };
192
193         perf_event_update_userpage(event);
194 }
195
196 /* vega20 pmus */
197
198 /* init pmu tracking per pmu type */
199 static int init_pmu_by_type(struct amdgpu_device *adev,
200                   const struct attribute_group *attr_groups[],
201                   char *pmu_type_name, char *pmu_file_prefix,
202                   unsigned int pmu_perf_type,
203                   unsigned int num_counters)
204 {
205         char pmu_name[PMU_NAME_SIZE];
206         struct amdgpu_pmu_entry *pmu_entry;
207         int ret = 0;
208
209         pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
210
211         if (!pmu_entry)
212                 return -ENOMEM;
213
214         pmu_entry->adev = adev;
215         pmu_entry->pmu = (struct pmu){
216                 .event_init = amdgpu_perf_event_init,
217                 .add = amdgpu_perf_add,
218                 .del = amdgpu_perf_del,
219                 .start = amdgpu_perf_start,
220                 .stop = amdgpu_perf_stop,
221                 .read = amdgpu_perf_read,
222                 .task_ctx_nr = perf_invalid_context,
223         };
224
225         pmu_entry->pmu.attr_groups = attr_groups;
226         pmu_entry->pmu_perf_type = pmu_perf_type;
227         snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
228                                 pmu_file_prefix, adev->ddev->primary->index);
229
230         ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
231
232         if (ret) {
233                 kfree(pmu_entry);
234                 pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
235                 return ret;
236         }
237
238         pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
239                         pmu_type_name, num_counters);
240
241         list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
242
243         return 0;
244 }
245
246 /* init amdgpu_pmu */
247 int amdgpu_pmu_init(struct amdgpu_device *adev)
248 {
249         int ret = 0;
250
251         switch (adev->asic_type) {
252         case CHIP_VEGA20:
253                 /* init df */
254                 ret = init_pmu_by_type(adev, df_v3_6_attr_groups,
255                                        "DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF,
256                                        DF_V3_6_MAX_COUNTERS);
257
258                 /* other pmu types go here*/
259                 break;
260         default:
261                 return 0;
262         }
263
264         return 0;
265 }
266
267
268 /* destroy all pmu data associated with target device */
269 void amdgpu_pmu_fini(struct amdgpu_device *adev)
270 {
271         struct amdgpu_pmu_entry *pe, *temp;
272
273         list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
274                 if (pe->adev == adev) {
275                         list_del(&pe->entry);
276                         perf_pmu_unregister(&pe->pmu);
277                         kfree(pe);
278                 }
279         }
280 }