2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
95 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
99 rdev->pm.dynpm_can_upclock = true;
100 rdev->pm.dynpm_can_downclock = true;
102 /* power state array is low to high, default is first */
103 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
104 int min_power_state_index = 0;
106 if (rdev->pm.num_power_states > 2)
107 min_power_state_index = 1;
109 switch (rdev->pm.dynpm_planned_action) {
110 case DYNPM_ACTION_MINIMUM:
111 rdev->pm.requested_power_state_index = min_power_state_index;
112 rdev->pm.requested_clock_mode_index = 0;
113 rdev->pm.dynpm_can_downclock = false;
115 case DYNPM_ACTION_DOWNCLOCK:
116 if (rdev->pm.current_power_state_index == min_power_state_index) {
117 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
118 rdev->pm.dynpm_can_downclock = false;
120 if (rdev->pm.active_crtc_count > 1) {
121 for (i = 0; i < rdev->pm.num_power_states; i++) {
122 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
124 else if (i >= rdev->pm.current_power_state_index) {
125 rdev->pm.requested_power_state_index =
126 rdev->pm.current_power_state_index;
129 rdev->pm.requested_power_state_index = i;
134 rdev->pm.requested_power_state_index =
135 rdev->pm.current_power_state_index - 1;
137 rdev->pm.requested_clock_mode_index = 0;
138 /* don't use the power state if crtcs are active and no display flag is set */
139 if ((rdev->pm.active_crtc_count > 0) &&
140 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
141 clock_info[rdev->pm.requested_clock_mode_index].flags &
142 RADEON_PM_MODE_NO_DISPLAY)) {
143 rdev->pm.requested_power_state_index++;
146 case DYNPM_ACTION_UPCLOCK:
147 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
148 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
149 rdev->pm.dynpm_can_upclock = false;
151 if (rdev->pm.active_crtc_count > 1) {
152 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
153 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
155 else if (i <= rdev->pm.current_power_state_index) {
156 rdev->pm.requested_power_state_index =
157 rdev->pm.current_power_state_index;
160 rdev->pm.requested_power_state_index = i;
165 rdev->pm.requested_power_state_index =
166 rdev->pm.current_power_state_index + 1;
168 rdev->pm.requested_clock_mode_index = 0;
170 case DYNPM_ACTION_DEFAULT:
171 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
172 rdev->pm.requested_clock_mode_index = 0;
173 rdev->pm.dynpm_can_upclock = false;
175 case DYNPM_ACTION_NONE:
177 DRM_ERROR("Requested mode for not defined action\n");
181 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
182 /* for now just select the first power state and switch between clock modes */
183 /* power state array is low to high, default is first (0) */
184 if (rdev->pm.active_crtc_count > 1) {
185 rdev->pm.requested_power_state_index = -1;
186 /* start at 1 as we don't want the default mode */
187 for (i = 1; i < rdev->pm.num_power_states; i++) {
188 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
190 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
191 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
192 rdev->pm.requested_power_state_index = i;
196 /* if nothing selected, grab the default state. */
197 if (rdev->pm.requested_power_state_index == -1)
198 rdev->pm.requested_power_state_index = 0;
200 rdev->pm.requested_power_state_index = 1;
202 switch (rdev->pm.dynpm_planned_action) {
203 case DYNPM_ACTION_MINIMUM:
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.dynpm_can_downclock = false;
207 case DYNPM_ACTION_DOWNCLOCK:
208 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
209 if (rdev->pm.current_clock_mode_index == 0) {
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.dynpm_can_downclock = false;
213 rdev->pm.requested_clock_mode_index =
214 rdev->pm.current_clock_mode_index - 1;
216 rdev->pm.requested_clock_mode_index = 0;
217 rdev->pm.dynpm_can_downclock = false;
219 /* don't use the power state if crtcs are active and no display flag is set */
220 if ((rdev->pm.active_crtc_count > 0) &&
221 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
222 clock_info[rdev->pm.requested_clock_mode_index].flags &
223 RADEON_PM_MODE_NO_DISPLAY)) {
224 rdev->pm.requested_clock_mode_index++;
227 case DYNPM_ACTION_UPCLOCK:
228 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
229 if (rdev->pm.current_clock_mode_index ==
230 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
231 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
232 rdev->pm.dynpm_can_upclock = false;
234 rdev->pm.requested_clock_mode_index =
235 rdev->pm.current_clock_mode_index + 1;
237 rdev->pm.requested_clock_mode_index =
238 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
239 rdev->pm.dynpm_can_upclock = false;
242 case DYNPM_ACTION_DEFAULT:
243 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
244 rdev->pm.requested_clock_mode_index = 0;
245 rdev->pm.dynpm_can_upclock = false;
247 case DYNPM_ACTION_NONE:
249 DRM_ERROR("Requested mode for not defined action\n");
254 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
255 rdev->pm.power_state[rdev->pm.requested_power_state_index].
256 clock_info[rdev->pm.requested_clock_mode_index].sclk,
257 rdev->pm.power_state[rdev->pm.requested_power_state_index].
258 clock_info[rdev->pm.requested_clock_mode_index].mclk,
259 rdev->pm.power_state[rdev->pm.requested_power_state_index].
263 static int r600_pm_get_type_index(struct radeon_device *rdev,
264 enum radeon_pm_state_type ps_type,
268 int found_instance = -1;
270 for (i = 0; i < rdev->pm.num_power_states; i++) {
271 if (rdev->pm.power_state[i].type == ps_type) {
273 if (found_instance == instance)
277 /* return default if no match */
278 return rdev->pm.default_power_state_index;
281 void rs780_pm_init_profile(struct radeon_device *rdev)
283 if (rdev->pm.num_power_states == 2) {
285 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
286 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
287 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
288 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
290 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
291 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
292 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
293 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
295 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
296 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
297 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
305 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
307 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
309 } else if (rdev->pm.num_power_states == 3) {
311 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
312 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
313 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
317 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
318 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
333 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
343 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
344 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
348 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
349 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
352 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
353 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
357 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
358 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
359 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
360 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
364 void r600_pm_init_profile(struct radeon_device *rdev)
366 if (rdev->family == CHIP_R600) {
369 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
380 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
381 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
384 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
385 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
386 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
387 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
389 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
390 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
391 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
394 if (rdev->pm.num_power_states < 4) {
396 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
397 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
398 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
401 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
402 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
403 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
406 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
407 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
408 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
409 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
411 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
412 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
413 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
414 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
416 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
417 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
418 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
419 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
422 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
425 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
427 if (rdev->flags & RADEON_IS_MOBILITY) {
428 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
429 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
430 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
431 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
432 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
433 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
435 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
436 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
437 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
438 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
439 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
443 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
444 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
445 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
446 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
447 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
448 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
450 if (rdev->flags & RADEON_IS_MOBILITY) {
451 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
452 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
453 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
454 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
455 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
456 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
458 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
459 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
460 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
461 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
462 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
463 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
467 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
468 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
469 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
470 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
476 void r600_pm_misc(struct radeon_device *rdev)
478 int requested_index = rdev->pm.requested_power_state_index;
479 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
480 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
482 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
483 radeon_atom_set_voltage(rdev, voltage->voltage);
487 bool r600_gui_idle(struct radeon_device *rdev)
489 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
495 /* hpd for digital panel detect/disconnect */
496 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
498 bool connected = false;
500 if (ASIC_IS_DCE3(rdev)) {
503 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
507 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
511 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
515 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
520 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
524 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
533 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
537 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
541 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
551 void r600_hpd_set_polarity(struct radeon_device *rdev,
552 enum radeon_hpd_id hpd)
555 bool connected = r600_hpd_sense(rdev, hpd);
557 if (ASIC_IS_DCE3(rdev)) {
560 tmp = RREG32(DC_HPD1_INT_CONTROL);
562 tmp &= ~DC_HPDx_INT_POLARITY;
564 tmp |= DC_HPDx_INT_POLARITY;
565 WREG32(DC_HPD1_INT_CONTROL, tmp);
568 tmp = RREG32(DC_HPD2_INT_CONTROL);
570 tmp &= ~DC_HPDx_INT_POLARITY;
572 tmp |= DC_HPDx_INT_POLARITY;
573 WREG32(DC_HPD2_INT_CONTROL, tmp);
576 tmp = RREG32(DC_HPD3_INT_CONTROL);
578 tmp &= ~DC_HPDx_INT_POLARITY;
580 tmp |= DC_HPDx_INT_POLARITY;
581 WREG32(DC_HPD3_INT_CONTROL, tmp);
584 tmp = RREG32(DC_HPD4_INT_CONTROL);
586 tmp &= ~DC_HPDx_INT_POLARITY;
588 tmp |= DC_HPDx_INT_POLARITY;
589 WREG32(DC_HPD4_INT_CONTROL, tmp);
592 tmp = RREG32(DC_HPD5_INT_CONTROL);
594 tmp &= ~DC_HPDx_INT_POLARITY;
596 tmp |= DC_HPDx_INT_POLARITY;
597 WREG32(DC_HPD5_INT_CONTROL, tmp);
601 tmp = RREG32(DC_HPD6_INT_CONTROL);
603 tmp &= ~DC_HPDx_INT_POLARITY;
605 tmp |= DC_HPDx_INT_POLARITY;
606 WREG32(DC_HPD6_INT_CONTROL, tmp);
614 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
616 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
618 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
619 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
622 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
624 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
626 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
627 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
630 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
632 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
634 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
635 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
643 void r600_hpd_init(struct radeon_device *rdev)
645 struct drm_device *dev = rdev->ddev;
646 struct drm_connector *connector;
648 if (ASIC_IS_DCE3(rdev)) {
649 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
650 if (ASIC_IS_DCE32(rdev))
653 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
654 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
655 switch (radeon_connector->hpd.hpd) {
657 WREG32(DC_HPD1_CONTROL, tmp);
658 rdev->irq.hpd[0] = true;
661 WREG32(DC_HPD2_CONTROL, tmp);
662 rdev->irq.hpd[1] = true;
665 WREG32(DC_HPD3_CONTROL, tmp);
666 rdev->irq.hpd[2] = true;
669 WREG32(DC_HPD4_CONTROL, tmp);
670 rdev->irq.hpd[3] = true;
674 WREG32(DC_HPD5_CONTROL, tmp);
675 rdev->irq.hpd[4] = true;
678 WREG32(DC_HPD6_CONTROL, tmp);
679 rdev->irq.hpd[5] = true;
686 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
687 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
688 switch (radeon_connector->hpd.hpd) {
690 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
691 rdev->irq.hpd[0] = true;
694 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
695 rdev->irq.hpd[1] = true;
698 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
699 rdev->irq.hpd[2] = true;
706 if (rdev->irq.installed)
710 void r600_hpd_fini(struct radeon_device *rdev)
712 struct drm_device *dev = rdev->ddev;
713 struct drm_connector *connector;
715 if (ASIC_IS_DCE3(rdev)) {
716 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
717 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
718 switch (radeon_connector->hpd.hpd) {
720 WREG32(DC_HPD1_CONTROL, 0);
721 rdev->irq.hpd[0] = false;
724 WREG32(DC_HPD2_CONTROL, 0);
725 rdev->irq.hpd[1] = false;
728 WREG32(DC_HPD3_CONTROL, 0);
729 rdev->irq.hpd[2] = false;
732 WREG32(DC_HPD4_CONTROL, 0);
733 rdev->irq.hpd[3] = false;
737 WREG32(DC_HPD5_CONTROL, 0);
738 rdev->irq.hpd[4] = false;
741 WREG32(DC_HPD6_CONTROL, 0);
742 rdev->irq.hpd[5] = false;
749 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
750 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
751 switch (radeon_connector->hpd.hpd) {
753 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
754 rdev->irq.hpd[0] = false;
757 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
758 rdev->irq.hpd[1] = false;
761 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
762 rdev->irq.hpd[2] = false;
774 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
779 /* flush hdp cache so updates hit vram */
780 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
782 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
783 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
784 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
785 for (i = 0; i < rdev->usec_timeout; i++) {
787 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
788 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
790 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
800 int r600_pcie_gart_init(struct radeon_device *rdev)
804 if (rdev->gart.table.vram.robj) {
805 WARN(1, "R600 PCIE GART already initialized.\n");
808 /* Initialize common gart structure */
809 r = radeon_gart_init(rdev);
812 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
813 return radeon_gart_table_vram_alloc(rdev);
816 int r600_pcie_gart_enable(struct radeon_device *rdev)
821 if (rdev->gart.table.vram.robj == NULL) {
822 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
825 r = radeon_gart_table_vram_pin(rdev);
828 radeon_gart_restore(rdev);
831 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
832 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
833 EFFECTIVE_L2_QUEUE_SIZE(7));
834 WREG32(VM_L2_CNTL2, 0);
835 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
836 /* Setup TLB control */
837 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
838 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
839 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
840 ENABLE_WAIT_L2_QUERY;
841 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
842 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
843 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
844 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
845 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
846 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
847 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
848 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
849 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
850 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
851 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
852 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
853 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
854 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
855 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
856 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
857 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
858 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
859 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
860 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
861 (u32)(rdev->dummy_page.addr >> 12));
862 for (i = 1; i < 7; i++)
863 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
865 r600_pcie_gart_tlb_flush(rdev);
866 rdev->gart.ready = true;
870 void r600_pcie_gart_disable(struct radeon_device *rdev)
875 /* Disable all tables */
876 for (i = 0; i < 7; i++)
877 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
879 /* Disable L2 cache */
880 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
881 EFFECTIVE_L2_QUEUE_SIZE(7));
882 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
883 /* Setup L1 TLB control */
884 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
885 ENABLE_WAIT_L2_QUERY;
886 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
887 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
888 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
889 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
890 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
891 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
892 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
893 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
894 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
895 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
896 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
897 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
898 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
899 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
900 if (rdev->gart.table.vram.robj) {
901 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
902 if (likely(r == 0)) {
903 radeon_bo_kunmap(rdev->gart.table.vram.robj);
904 radeon_bo_unpin(rdev->gart.table.vram.robj);
905 radeon_bo_unreserve(rdev->gart.table.vram.robj);
910 void r600_pcie_gart_fini(struct radeon_device *rdev)
912 radeon_gart_fini(rdev);
913 r600_pcie_gart_disable(rdev);
914 radeon_gart_table_vram_free(rdev);
917 void r600_agp_enable(struct radeon_device *rdev)
923 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
924 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
925 EFFECTIVE_L2_QUEUE_SIZE(7));
926 WREG32(VM_L2_CNTL2, 0);
927 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
928 /* Setup TLB control */
929 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
930 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
931 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
932 ENABLE_WAIT_L2_QUERY;
933 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
934 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
935 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
936 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
937 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
938 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
939 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
940 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
941 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
942 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
943 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
944 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
945 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
946 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
947 for (i = 0; i < 7; i++)
948 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
951 int r600_mc_wait_for_idle(struct radeon_device *rdev)
956 for (i = 0; i < rdev->usec_timeout; i++) {
958 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
966 static void r600_mc_program(struct radeon_device *rdev)
968 struct rv515_mc_save save;
973 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
974 WREG32((0x2c14 + j), 0x00000000);
975 WREG32((0x2c18 + j), 0x00000000);
976 WREG32((0x2c1c + j), 0x00000000);
977 WREG32((0x2c20 + j), 0x00000000);
978 WREG32((0x2c24 + j), 0x00000000);
980 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
982 rv515_mc_stop(rdev, &save);
983 if (r600_mc_wait_for_idle(rdev)) {
984 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
986 /* Lockout access through VGA aperture (doesn't exist before R600) */
987 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
988 /* Update configuration */
989 if (rdev->flags & RADEON_IS_AGP) {
990 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
991 /* VRAM before AGP */
992 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
993 rdev->mc.vram_start >> 12);
994 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
995 rdev->mc.gtt_end >> 12);
998 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
999 rdev->mc.gtt_start >> 12);
1000 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1001 rdev->mc.vram_end >> 12);
1004 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1005 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1007 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1008 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1009 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1010 WREG32(MC_VM_FB_LOCATION, tmp);
1011 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1012 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1013 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
1014 if (rdev->flags & RADEON_IS_AGP) {
1015 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1016 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1017 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1019 WREG32(MC_VM_AGP_BASE, 0);
1020 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1021 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1023 if (r600_mc_wait_for_idle(rdev)) {
1024 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1026 rv515_mc_resume(rdev, &save);
1027 /* we need to own VRAM, so turn off the VGA renderer here
1028 * to stop it overwriting our objects */
1029 rv515_vga_render_disable(rdev);
1033 * r600_vram_gtt_location - try to find VRAM & GTT location
1034 * @rdev: radeon device structure holding all necessary informations
1035 * @mc: memory controller structure holding memory informations
1037 * Function will place try to place VRAM at same place as in CPU (PCI)
1038 * address space as some GPU seems to have issue when we reprogram at
1039 * different address space.
1041 * If there is not enough space to fit the unvisible VRAM after the
1042 * aperture then we limit the VRAM size to the aperture.
1044 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1045 * them to be in one from GPU point of view so that we can program GPU to
1046 * catch access outside them (weird GPU policy see ??).
1048 * This function will never fails, worst case are limiting VRAM or GTT.
1050 * Note: GTT start, end, size should be initialized before calling this
1051 * function on AGP platform.
1053 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1055 u64 size_bf, size_af;
1057 if (mc->mc_vram_size > 0xE0000000) {
1058 /* leave room for at least 512M GTT */
1059 dev_warn(rdev->dev, "limiting VRAM\n");
1060 mc->real_vram_size = 0xE0000000;
1061 mc->mc_vram_size = 0xE0000000;
1063 if (rdev->flags & RADEON_IS_AGP) {
1064 size_bf = mc->gtt_start;
1065 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1066 if (size_bf > size_af) {
1067 if (mc->mc_vram_size > size_bf) {
1068 dev_warn(rdev->dev, "limiting VRAM\n");
1069 mc->real_vram_size = size_bf;
1070 mc->mc_vram_size = size_bf;
1072 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1074 if (mc->mc_vram_size > size_af) {
1075 dev_warn(rdev->dev, "limiting VRAM\n");
1076 mc->real_vram_size = size_af;
1077 mc->mc_vram_size = size_af;
1079 mc->vram_start = mc->gtt_end;
1081 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1082 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1083 mc->mc_vram_size >> 20, mc->vram_start,
1084 mc->vram_end, mc->real_vram_size >> 20);
1087 if (rdev->flags & RADEON_IS_IGP)
1088 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
1089 radeon_vram_location(rdev, &rdev->mc, base);
1090 radeon_gtt_location(rdev, mc);
1094 int r600_mc_init(struct radeon_device *rdev)
1097 int chansize, numchan;
1099 /* Get VRAM informations */
1100 rdev->mc.vram_is_ddr = true;
1101 tmp = RREG32(RAMCFG);
1102 if (tmp & CHANSIZE_OVERRIDE) {
1104 } else if (tmp & CHANSIZE_MASK) {
1109 tmp = RREG32(CHMAP);
1110 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1125 rdev->mc.vram_width = numchan * chansize;
1126 /* Could aper size report 0 ? */
1127 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1128 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1129 /* Setup GPU memory space */
1130 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1131 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1132 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1133 r600_vram_gtt_location(rdev, &rdev->mc);
1135 if (rdev->flags & RADEON_IS_IGP)
1136 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1137 radeon_update_bandwidth_info(rdev);
1141 /* We doesn't check that the GPU really needs a reset we simply do the
1142 * reset, it's up to the caller to determine if the GPU needs one. We
1143 * might add an helper function to check that.
1145 int r600_gpu_soft_reset(struct radeon_device *rdev)
1147 struct rv515_mc_save save;
1148 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1149 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1150 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1151 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1152 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1153 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1154 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1155 S_008010_GUI_ACTIVE(1);
1156 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1157 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1158 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1159 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1160 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1161 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1162 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1163 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1166 dev_info(rdev->dev, "GPU softreset \n");
1167 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1168 RREG32(R_008010_GRBM_STATUS));
1169 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1170 RREG32(R_008014_GRBM_STATUS2));
1171 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1172 RREG32(R_000E50_SRBM_STATUS));
1173 rv515_mc_stop(rdev, &save);
1174 if (r600_mc_wait_for_idle(rdev)) {
1175 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1177 /* Disable CP parsing/prefetching */
1178 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1179 /* Check if any of the rendering block is busy and reset it */
1180 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1181 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1182 tmp = S_008020_SOFT_RESET_CR(1) |
1183 S_008020_SOFT_RESET_DB(1) |
1184 S_008020_SOFT_RESET_CB(1) |
1185 S_008020_SOFT_RESET_PA(1) |
1186 S_008020_SOFT_RESET_SC(1) |
1187 S_008020_SOFT_RESET_SMX(1) |
1188 S_008020_SOFT_RESET_SPI(1) |
1189 S_008020_SOFT_RESET_SX(1) |
1190 S_008020_SOFT_RESET_SH(1) |
1191 S_008020_SOFT_RESET_TC(1) |
1192 S_008020_SOFT_RESET_TA(1) |
1193 S_008020_SOFT_RESET_VC(1) |
1194 S_008020_SOFT_RESET_VGT(1);
1195 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1196 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1197 RREG32(R_008020_GRBM_SOFT_RESET);
1199 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1201 /* Reset CP (we always reset CP) */
1202 tmp = S_008020_SOFT_RESET_CP(1);
1203 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1204 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1205 RREG32(R_008020_GRBM_SOFT_RESET);
1207 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1208 /* Wait a little for things to settle down */
1210 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1211 RREG32(R_008010_GRBM_STATUS));
1212 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1213 RREG32(R_008014_GRBM_STATUS2));
1214 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1215 RREG32(R_000E50_SRBM_STATUS));
1216 rv515_mc_resume(rdev, &save);
1220 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1227 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1228 grbm_status = RREG32(R_008010_GRBM_STATUS);
1229 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1230 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1231 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1234 /* force CP activities */
1235 r = radeon_ring_lock(rdev, 2);
1238 radeon_ring_write(rdev, 0x80000000);
1239 radeon_ring_write(rdev, 0x80000000);
1240 radeon_ring_unlock_commit(rdev);
1242 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1243 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1246 int r600_asic_reset(struct radeon_device *rdev)
1248 return r600_gpu_soft_reset(rdev);
1251 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1253 u32 backend_disable_mask)
1255 u32 backend_map = 0;
1256 u32 enabled_backends_mask;
1257 u32 enabled_backends_count;
1259 u32 swizzle_pipe[R6XX_MAX_PIPES];
1263 if (num_tile_pipes > R6XX_MAX_PIPES)
1264 num_tile_pipes = R6XX_MAX_PIPES;
1265 if (num_tile_pipes < 1)
1267 if (num_backends > R6XX_MAX_BACKENDS)
1268 num_backends = R6XX_MAX_BACKENDS;
1269 if (num_backends < 1)
1272 enabled_backends_mask = 0;
1273 enabled_backends_count = 0;
1274 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1275 if (((backend_disable_mask >> i) & 1) == 0) {
1276 enabled_backends_mask |= (1 << i);
1277 ++enabled_backends_count;
1279 if (enabled_backends_count == num_backends)
1283 if (enabled_backends_count == 0) {
1284 enabled_backends_mask = 1;
1285 enabled_backends_count = 1;
1288 if (enabled_backends_count != num_backends)
1289 num_backends = enabled_backends_count;
1291 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1292 switch (num_tile_pipes) {
1294 swizzle_pipe[0] = 0;
1297 swizzle_pipe[0] = 0;
1298 swizzle_pipe[1] = 1;
1301 swizzle_pipe[0] = 0;
1302 swizzle_pipe[1] = 1;
1303 swizzle_pipe[2] = 2;
1306 swizzle_pipe[0] = 0;
1307 swizzle_pipe[1] = 1;
1308 swizzle_pipe[2] = 2;
1309 swizzle_pipe[3] = 3;
1312 swizzle_pipe[0] = 0;
1313 swizzle_pipe[1] = 1;
1314 swizzle_pipe[2] = 2;
1315 swizzle_pipe[3] = 3;
1316 swizzle_pipe[4] = 4;
1319 swizzle_pipe[0] = 0;
1320 swizzle_pipe[1] = 2;
1321 swizzle_pipe[2] = 4;
1322 swizzle_pipe[3] = 5;
1323 swizzle_pipe[4] = 1;
1324 swizzle_pipe[5] = 3;
1327 swizzle_pipe[0] = 0;
1328 swizzle_pipe[1] = 2;
1329 swizzle_pipe[2] = 4;
1330 swizzle_pipe[3] = 6;
1331 swizzle_pipe[4] = 1;
1332 swizzle_pipe[5] = 3;
1333 swizzle_pipe[6] = 5;
1336 swizzle_pipe[0] = 0;
1337 swizzle_pipe[1] = 2;
1338 swizzle_pipe[2] = 4;
1339 swizzle_pipe[3] = 6;
1340 swizzle_pipe[4] = 1;
1341 swizzle_pipe[5] = 3;
1342 swizzle_pipe[6] = 5;
1343 swizzle_pipe[7] = 7;
1348 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1349 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1350 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1352 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1354 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1360 int r600_count_pipe_bits(uint32_t val)
1364 for (i = 0; i < 32; i++) {
1371 void r600_gpu_init(struct radeon_device *rdev)
1376 u32 cc_rb_backend_disable;
1377 u32 cc_gc_shader_pipe_config;
1381 u32 sq_gpr_resource_mgmt_1 = 0;
1382 u32 sq_gpr_resource_mgmt_2 = 0;
1383 u32 sq_thread_resource_mgmt = 0;
1384 u32 sq_stack_resource_mgmt_1 = 0;
1385 u32 sq_stack_resource_mgmt_2 = 0;
1387 /* FIXME: implement */
1388 switch (rdev->family) {
1390 rdev->config.r600.max_pipes = 4;
1391 rdev->config.r600.max_tile_pipes = 8;
1392 rdev->config.r600.max_simds = 4;
1393 rdev->config.r600.max_backends = 4;
1394 rdev->config.r600.max_gprs = 256;
1395 rdev->config.r600.max_threads = 192;
1396 rdev->config.r600.max_stack_entries = 256;
1397 rdev->config.r600.max_hw_contexts = 8;
1398 rdev->config.r600.max_gs_threads = 16;
1399 rdev->config.r600.sx_max_export_size = 128;
1400 rdev->config.r600.sx_max_export_pos_size = 16;
1401 rdev->config.r600.sx_max_export_smx_size = 128;
1402 rdev->config.r600.sq_num_cf_insts = 2;
1406 rdev->config.r600.max_pipes = 2;
1407 rdev->config.r600.max_tile_pipes = 2;
1408 rdev->config.r600.max_simds = 3;
1409 rdev->config.r600.max_backends = 1;
1410 rdev->config.r600.max_gprs = 128;
1411 rdev->config.r600.max_threads = 192;
1412 rdev->config.r600.max_stack_entries = 128;
1413 rdev->config.r600.max_hw_contexts = 8;
1414 rdev->config.r600.max_gs_threads = 4;
1415 rdev->config.r600.sx_max_export_size = 128;
1416 rdev->config.r600.sx_max_export_pos_size = 16;
1417 rdev->config.r600.sx_max_export_smx_size = 128;
1418 rdev->config.r600.sq_num_cf_insts = 2;
1424 rdev->config.r600.max_pipes = 1;
1425 rdev->config.r600.max_tile_pipes = 1;
1426 rdev->config.r600.max_simds = 2;
1427 rdev->config.r600.max_backends = 1;
1428 rdev->config.r600.max_gprs = 128;
1429 rdev->config.r600.max_threads = 192;
1430 rdev->config.r600.max_stack_entries = 128;
1431 rdev->config.r600.max_hw_contexts = 4;
1432 rdev->config.r600.max_gs_threads = 4;
1433 rdev->config.r600.sx_max_export_size = 128;
1434 rdev->config.r600.sx_max_export_pos_size = 16;
1435 rdev->config.r600.sx_max_export_smx_size = 128;
1436 rdev->config.r600.sq_num_cf_insts = 1;
1439 rdev->config.r600.max_pipes = 4;
1440 rdev->config.r600.max_tile_pipes = 4;
1441 rdev->config.r600.max_simds = 4;
1442 rdev->config.r600.max_backends = 4;
1443 rdev->config.r600.max_gprs = 192;
1444 rdev->config.r600.max_threads = 192;
1445 rdev->config.r600.max_stack_entries = 256;
1446 rdev->config.r600.max_hw_contexts = 8;
1447 rdev->config.r600.max_gs_threads = 16;
1448 rdev->config.r600.sx_max_export_size = 128;
1449 rdev->config.r600.sx_max_export_pos_size = 16;
1450 rdev->config.r600.sx_max_export_smx_size = 128;
1451 rdev->config.r600.sq_num_cf_insts = 2;
1457 /* Initialize HDP */
1458 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1459 WREG32((0x2c14 + j), 0x00000000);
1460 WREG32((0x2c18 + j), 0x00000000);
1461 WREG32((0x2c1c + j), 0x00000000);
1462 WREG32((0x2c20 + j), 0x00000000);
1463 WREG32((0x2c24 + j), 0x00000000);
1466 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1470 ramcfg = RREG32(RAMCFG);
1471 switch (rdev->config.r600.max_tile_pipes) {
1473 tiling_config |= PIPE_TILING(0);
1476 tiling_config |= PIPE_TILING(1);
1479 tiling_config |= PIPE_TILING(2);
1482 tiling_config |= PIPE_TILING(3);
1487 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1488 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1489 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1490 tiling_config |= GROUP_SIZE(0);
1491 rdev->config.r600.tiling_group_size = 256;
1492 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1494 tiling_config |= ROW_TILING(3);
1495 tiling_config |= SAMPLE_SPLIT(3);
1497 tiling_config |= ROW_TILING(tmp);
1498 tiling_config |= SAMPLE_SPLIT(tmp);
1500 tiling_config |= BANK_SWAPS(1);
1502 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1503 cc_rb_backend_disable |=
1504 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1506 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1507 cc_gc_shader_pipe_config |=
1508 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1509 cc_gc_shader_pipe_config |=
1510 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1512 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1513 (R6XX_MAX_BACKENDS -
1514 r600_count_pipe_bits((cc_rb_backend_disable &
1515 R6XX_MAX_BACKENDS_MASK) >> 16)),
1516 (cc_rb_backend_disable >> 16));
1518 tiling_config |= BACKEND_MAP(backend_map);
1519 WREG32(GB_TILING_CONFIG, tiling_config);
1520 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1521 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1524 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1525 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1526 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1528 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1529 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1530 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1532 /* Setup some CP states */
1533 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1534 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1536 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1537 SYNC_WALKER | SYNC_ALIGNER));
1538 /* Setup various GPU states */
1539 if (rdev->family == CHIP_RV670)
1540 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1542 tmp = RREG32(SX_DEBUG_1);
1543 tmp |= SMX_EVENT_RELEASE;
1544 if ((rdev->family > CHIP_R600))
1545 tmp |= ENABLE_NEW_SMX_ADDRESS;
1546 WREG32(SX_DEBUG_1, tmp);
1548 if (((rdev->family) == CHIP_R600) ||
1549 ((rdev->family) == CHIP_RV630) ||
1550 ((rdev->family) == CHIP_RV610) ||
1551 ((rdev->family) == CHIP_RV620) ||
1552 ((rdev->family) == CHIP_RS780) ||
1553 ((rdev->family) == CHIP_RS880)) {
1554 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1556 WREG32(DB_DEBUG, 0);
1558 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1559 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1561 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1562 WREG32(VGT_NUM_INSTANCES, 0);
1564 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1565 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1567 tmp = RREG32(SQ_MS_FIFO_SIZES);
1568 if (((rdev->family) == CHIP_RV610) ||
1569 ((rdev->family) == CHIP_RV620) ||
1570 ((rdev->family) == CHIP_RS780) ||
1571 ((rdev->family) == CHIP_RS880)) {
1572 tmp = (CACHE_FIFO_SIZE(0xa) |
1573 FETCH_FIFO_HIWATER(0xa) |
1574 DONE_FIFO_HIWATER(0xe0) |
1575 ALU_UPDATE_FIFO_HIWATER(0x8));
1576 } else if (((rdev->family) == CHIP_R600) ||
1577 ((rdev->family) == CHIP_RV630)) {
1578 tmp &= ~DONE_FIFO_HIWATER(0xff);
1579 tmp |= DONE_FIFO_HIWATER(0x4);
1581 WREG32(SQ_MS_FIFO_SIZES, tmp);
1583 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1584 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1586 sq_config = RREG32(SQ_CONFIG);
1587 sq_config &= ~(PS_PRIO(3) |
1591 sq_config |= (DX9_CONSTS |
1598 if ((rdev->family) == CHIP_R600) {
1599 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1601 NUM_CLAUSE_TEMP_GPRS(4));
1602 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1604 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1605 NUM_VS_THREADS(48) |
1608 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1609 NUM_VS_STACK_ENTRIES(128));
1610 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1611 NUM_ES_STACK_ENTRIES(0));
1612 } else if (((rdev->family) == CHIP_RV610) ||
1613 ((rdev->family) == CHIP_RV620) ||
1614 ((rdev->family) == CHIP_RS780) ||
1615 ((rdev->family) == CHIP_RS880)) {
1616 /* no vertex cache */
1617 sq_config &= ~VC_ENABLE;
1619 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1621 NUM_CLAUSE_TEMP_GPRS(2));
1622 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1624 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1625 NUM_VS_THREADS(78) |
1627 NUM_ES_THREADS(31));
1628 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1629 NUM_VS_STACK_ENTRIES(40));
1630 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1631 NUM_ES_STACK_ENTRIES(16));
1632 } else if (((rdev->family) == CHIP_RV630) ||
1633 ((rdev->family) == CHIP_RV635)) {
1634 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1636 NUM_CLAUSE_TEMP_GPRS(2));
1637 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1639 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1640 NUM_VS_THREADS(78) |
1642 NUM_ES_THREADS(31));
1643 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1644 NUM_VS_STACK_ENTRIES(40));
1645 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1646 NUM_ES_STACK_ENTRIES(16));
1647 } else if ((rdev->family) == CHIP_RV670) {
1648 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1650 NUM_CLAUSE_TEMP_GPRS(2));
1651 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1653 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1654 NUM_VS_THREADS(78) |
1656 NUM_ES_THREADS(31));
1657 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1658 NUM_VS_STACK_ENTRIES(64));
1659 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1660 NUM_ES_STACK_ENTRIES(64));
1663 WREG32(SQ_CONFIG, sq_config);
1664 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1665 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1666 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1667 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1668 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1670 if (((rdev->family) == CHIP_RV610) ||
1671 ((rdev->family) == CHIP_RV620) ||
1672 ((rdev->family) == CHIP_RS780) ||
1673 ((rdev->family) == CHIP_RS880)) {
1674 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1676 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1679 /* More default values. 2D/3D driver should adjust as needed */
1680 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1681 S1_X(0x4) | S1_Y(0xc)));
1682 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1683 S1_X(0x2) | S1_Y(0x2) |
1684 S2_X(0xa) | S2_Y(0x6) |
1685 S3_X(0x6) | S3_Y(0xa)));
1686 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1687 S1_X(0x4) | S1_Y(0xc) |
1688 S2_X(0x1) | S2_Y(0x6) |
1689 S3_X(0xa) | S3_Y(0xe)));
1690 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1691 S5_X(0x0) | S5_Y(0x0) |
1692 S6_X(0xb) | S6_Y(0x4) |
1693 S7_X(0x7) | S7_Y(0x8)));
1695 WREG32(VGT_STRMOUT_EN, 0);
1696 tmp = rdev->config.r600.max_pipes * 16;
1697 switch (rdev->family) {
1713 WREG32(VGT_ES_PER_GS, 128);
1714 WREG32(VGT_GS_PER_ES, tmp);
1715 WREG32(VGT_GS_PER_VS, 2);
1716 WREG32(VGT_GS_VERTEX_REUSE, 16);
1718 /* more default values. 2D/3D driver should adjust as needed */
1719 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1720 WREG32(VGT_STRMOUT_EN, 0);
1722 WREG32(PA_SC_MODE_CNTL, 0);
1723 WREG32(PA_SC_AA_CONFIG, 0);
1724 WREG32(PA_SC_LINE_STIPPLE, 0);
1725 WREG32(SPI_INPUT_Z, 0);
1726 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1727 WREG32(CB_COLOR7_FRAG, 0);
1729 /* Clear render buffer base addresses */
1730 WREG32(CB_COLOR0_BASE, 0);
1731 WREG32(CB_COLOR1_BASE, 0);
1732 WREG32(CB_COLOR2_BASE, 0);
1733 WREG32(CB_COLOR3_BASE, 0);
1734 WREG32(CB_COLOR4_BASE, 0);
1735 WREG32(CB_COLOR5_BASE, 0);
1736 WREG32(CB_COLOR6_BASE, 0);
1737 WREG32(CB_COLOR7_BASE, 0);
1738 WREG32(CB_COLOR7_FRAG, 0);
1740 switch (rdev->family) {
1745 tmp = TC_L2_SIZE(8);
1749 tmp = TC_L2_SIZE(4);
1752 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1755 tmp = TC_L2_SIZE(0);
1758 WREG32(TC_CNTL, tmp);
1760 tmp = RREG32(HDP_HOST_PATH_CNTL);
1761 WREG32(HDP_HOST_PATH_CNTL, tmp);
1763 tmp = RREG32(ARB_POP);
1764 tmp |= ENABLE_TC128;
1765 WREG32(ARB_POP, tmp);
1767 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1768 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1770 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1775 * Indirect registers accessor
1777 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1781 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1782 (void)RREG32(PCIE_PORT_INDEX);
1783 r = RREG32(PCIE_PORT_DATA);
1787 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1789 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1790 (void)RREG32(PCIE_PORT_INDEX);
1791 WREG32(PCIE_PORT_DATA, (v));
1792 (void)RREG32(PCIE_PORT_DATA);
1798 void r600_cp_stop(struct radeon_device *rdev)
1800 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1803 int r600_init_microcode(struct radeon_device *rdev)
1805 struct platform_device *pdev;
1806 const char *chip_name;
1807 const char *rlc_chip_name;
1808 size_t pfp_req_size, me_req_size, rlc_req_size;
1814 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1817 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1821 switch (rdev->family) {
1824 rlc_chip_name = "R600";
1827 chip_name = "RV610";
1828 rlc_chip_name = "R600";
1831 chip_name = "RV630";
1832 rlc_chip_name = "R600";
1835 chip_name = "RV620";
1836 rlc_chip_name = "R600";
1839 chip_name = "RV635";
1840 rlc_chip_name = "R600";
1843 chip_name = "RV670";
1844 rlc_chip_name = "R600";
1848 chip_name = "RS780";
1849 rlc_chip_name = "R600";
1852 chip_name = "RV770";
1853 rlc_chip_name = "R700";
1857 chip_name = "RV730";
1858 rlc_chip_name = "R700";
1861 chip_name = "RV710";
1862 rlc_chip_name = "R700";
1865 chip_name = "CEDAR";
1866 rlc_chip_name = "CEDAR";
1869 chip_name = "REDWOOD";
1870 rlc_chip_name = "REDWOOD";
1873 chip_name = "JUNIPER";
1874 rlc_chip_name = "JUNIPER";
1878 chip_name = "CYPRESS";
1879 rlc_chip_name = "CYPRESS";
1884 if (rdev->family >= CHIP_CEDAR) {
1885 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1886 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1887 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1888 } else if (rdev->family >= CHIP_RV770) {
1889 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1890 me_req_size = R700_PM4_UCODE_SIZE * 4;
1891 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1893 pfp_req_size = PFP_UCODE_SIZE * 4;
1894 me_req_size = PM4_UCODE_SIZE * 12;
1895 rlc_req_size = RLC_UCODE_SIZE * 4;
1898 DRM_INFO("Loading %s Microcode\n", chip_name);
1900 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1901 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1904 if (rdev->pfp_fw->size != pfp_req_size) {
1906 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1907 rdev->pfp_fw->size, fw_name);
1912 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1913 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1916 if (rdev->me_fw->size != me_req_size) {
1918 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1919 rdev->me_fw->size, fw_name);
1923 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1924 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1927 if (rdev->rlc_fw->size != rlc_req_size) {
1929 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1930 rdev->rlc_fw->size, fw_name);
1935 platform_device_unregister(pdev);
1940 "r600_cp: Failed to load firmware \"%s\"\n",
1942 release_firmware(rdev->pfp_fw);
1943 rdev->pfp_fw = NULL;
1944 release_firmware(rdev->me_fw);
1946 release_firmware(rdev->rlc_fw);
1947 rdev->rlc_fw = NULL;
1952 static int r600_cp_load_microcode(struct radeon_device *rdev)
1954 const __be32 *fw_data;
1957 if (!rdev->me_fw || !rdev->pfp_fw)
1962 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1965 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1966 RREG32(GRBM_SOFT_RESET);
1968 WREG32(GRBM_SOFT_RESET, 0);
1970 WREG32(CP_ME_RAM_WADDR, 0);
1972 fw_data = (const __be32 *)rdev->me_fw->data;
1973 WREG32(CP_ME_RAM_WADDR, 0);
1974 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1975 WREG32(CP_ME_RAM_DATA,
1976 be32_to_cpup(fw_data++));
1978 fw_data = (const __be32 *)rdev->pfp_fw->data;
1979 WREG32(CP_PFP_UCODE_ADDR, 0);
1980 for (i = 0; i < PFP_UCODE_SIZE; i++)
1981 WREG32(CP_PFP_UCODE_DATA,
1982 be32_to_cpup(fw_data++));
1984 WREG32(CP_PFP_UCODE_ADDR, 0);
1985 WREG32(CP_ME_RAM_WADDR, 0);
1986 WREG32(CP_ME_RAM_RADDR, 0);
1990 int r600_cp_start(struct radeon_device *rdev)
1995 r = radeon_ring_lock(rdev, 7);
1997 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2000 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2001 radeon_ring_write(rdev, 0x1);
2002 if (rdev->family >= CHIP_CEDAR) {
2003 radeon_ring_write(rdev, 0x0);
2004 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2005 } else if (rdev->family >= CHIP_RV770) {
2006 radeon_ring_write(rdev, 0x0);
2007 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2009 radeon_ring_write(rdev, 0x3);
2010 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2012 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2013 radeon_ring_write(rdev, 0);
2014 radeon_ring_write(rdev, 0);
2015 radeon_ring_unlock_commit(rdev);
2018 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2022 int r600_cp_resume(struct radeon_device *rdev)
2029 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2030 RREG32(GRBM_SOFT_RESET);
2032 WREG32(GRBM_SOFT_RESET, 0);
2034 /* Set ring buffer size */
2035 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2036 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2038 tmp |= BUF_SWAP_32BIT;
2040 WREG32(CP_RB_CNTL, tmp);
2041 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2043 /* Set the write pointer delay */
2044 WREG32(CP_RB_WPTR_DELAY, 0);
2046 /* Initialize the ring buffer's read and write pointers */
2047 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2048 WREG32(CP_RB_RPTR_WR, 0);
2049 WREG32(CP_RB_WPTR, 0);
2050 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2051 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2053 WREG32(CP_RB_CNTL, tmp);
2055 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2056 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2058 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2059 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2061 r600_cp_start(rdev);
2062 rdev->cp.ready = true;
2063 r = radeon_ring_test(rdev);
2065 rdev->cp.ready = false;
2071 void r600_cp_commit(struct radeon_device *rdev)
2073 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2074 (void)RREG32(CP_RB_WPTR);
2077 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2081 /* Align ring size */
2082 rb_bufsz = drm_order(ring_size / 8);
2083 ring_size = (1 << (rb_bufsz + 1)) * 4;
2084 rdev->cp.ring_size = ring_size;
2085 rdev->cp.align_mask = 16 - 1;
2088 void r600_cp_fini(struct radeon_device *rdev)
2091 radeon_ring_fini(rdev);
2096 * GPU scratch registers helpers function.
2098 void r600_scratch_init(struct radeon_device *rdev)
2102 rdev->scratch.num_reg = 7;
2103 for (i = 0; i < rdev->scratch.num_reg; i++) {
2104 rdev->scratch.free[i] = true;
2105 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2109 int r600_ring_test(struct radeon_device *rdev)
2116 r = radeon_scratch_get(rdev, &scratch);
2118 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2121 WREG32(scratch, 0xCAFEDEAD);
2122 r = radeon_ring_lock(rdev, 3);
2124 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2125 radeon_scratch_free(rdev, scratch);
2128 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2129 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2130 radeon_ring_write(rdev, 0xDEADBEEF);
2131 radeon_ring_unlock_commit(rdev);
2132 for (i = 0; i < rdev->usec_timeout; i++) {
2133 tmp = RREG32(scratch);
2134 if (tmp == 0xDEADBEEF)
2138 if (i < rdev->usec_timeout) {
2139 DRM_INFO("ring test succeeded in %d usecs\n", i);
2141 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2145 radeon_scratch_free(rdev, scratch);
2149 void r600_wb_disable(struct radeon_device *rdev)
2153 WREG32(SCRATCH_UMSK, 0);
2154 if (rdev->wb.wb_obj) {
2155 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2156 if (unlikely(r != 0))
2158 radeon_bo_kunmap(rdev->wb.wb_obj);
2159 radeon_bo_unpin(rdev->wb.wb_obj);
2160 radeon_bo_unreserve(rdev->wb.wb_obj);
2164 void r600_wb_fini(struct radeon_device *rdev)
2166 r600_wb_disable(rdev);
2167 if (rdev->wb.wb_obj) {
2168 radeon_bo_unref(&rdev->wb.wb_obj);
2170 rdev->wb.wb_obj = NULL;
2174 int r600_wb_enable(struct radeon_device *rdev)
2178 if (rdev->wb.wb_obj == NULL) {
2179 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2180 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2182 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2185 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2186 if (unlikely(r != 0)) {
2190 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2191 &rdev->wb.gpu_addr);
2193 radeon_bo_unreserve(rdev->wb.wb_obj);
2194 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2198 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2199 radeon_bo_unreserve(rdev->wb.wb_obj);
2201 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2206 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2207 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2208 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2209 WREG32(SCRATCH_UMSK, 0xff);
2213 void r600_fence_ring_emit(struct radeon_device *rdev,
2214 struct radeon_fence *fence)
2216 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
2218 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2219 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2220 /* wait for 3D idle clean */
2221 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2222 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2223 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2224 /* Emit fence sequence & fire IRQ */
2225 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2226 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2227 radeon_ring_write(rdev, fence->seq);
2228 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2229 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2230 radeon_ring_write(rdev, RB_INT_STAT);
2233 int r600_copy_blit(struct radeon_device *rdev,
2234 uint64_t src_offset, uint64_t dst_offset,
2235 unsigned num_pages, struct radeon_fence *fence)
2239 mutex_lock(&rdev->r600_blit.mutex);
2240 rdev->r600_blit.vb_ib = NULL;
2241 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2243 if (rdev->r600_blit.vb_ib)
2244 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2245 mutex_unlock(&rdev->r600_blit.mutex);
2248 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2249 r600_blit_done_copy(rdev, fence);
2250 mutex_unlock(&rdev->r600_blit.mutex);
2254 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2255 uint32_t tiling_flags, uint32_t pitch,
2256 uint32_t offset, uint32_t obj_size)
2258 /* FIXME: implement */
2262 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2264 /* FIXME: implement */
2268 bool r600_card_posted(struct radeon_device *rdev)
2272 /* first check CRTCs */
2273 reg = RREG32(D1CRTC_CONTROL) |
2274 RREG32(D2CRTC_CONTROL);
2278 /* then check MEM_SIZE, in case the crtcs are off */
2279 if (RREG32(CONFIG_MEMSIZE))
2285 int r600_startup(struct radeon_device *rdev)
2289 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2290 r = r600_init_microcode(rdev);
2292 DRM_ERROR("Failed to load firmware!\n");
2297 r600_mc_program(rdev);
2298 if (rdev->flags & RADEON_IS_AGP) {
2299 r600_agp_enable(rdev);
2301 r = r600_pcie_gart_enable(rdev);
2305 r600_gpu_init(rdev);
2306 r = r600_blit_init(rdev);
2308 r600_blit_fini(rdev);
2309 rdev->asic->copy = NULL;
2310 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2312 /* pin copy shader into vram */
2313 if (rdev->r600_blit.shader_obj) {
2314 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2315 if (unlikely(r != 0))
2317 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2318 &rdev->r600_blit.shader_gpu_addr);
2319 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2321 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2326 r = r600_irq_init(rdev);
2328 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2329 radeon_irq_kms_fini(rdev);
2334 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2337 r = r600_cp_load_microcode(rdev);
2340 r = r600_cp_resume(rdev);
2343 /* write back buffer are not vital so don't worry about failure */
2344 r600_wb_enable(rdev);
2348 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2352 temp = RREG32(CONFIG_CNTL);
2353 if (state == false) {
2359 WREG32(CONFIG_CNTL, temp);
2362 int r600_resume(struct radeon_device *rdev)
2366 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2367 * posting will perform necessary task to bring back GPU into good
2371 atom_asic_init(rdev->mode_info.atom_context);
2372 /* Initialize clocks */
2373 r = radeon_clocks_init(rdev);
2378 r = r600_startup(rdev);
2380 DRM_ERROR("r600 startup failed on resume\n");
2384 r = r600_ib_test(rdev);
2386 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2390 r = r600_audio_init(rdev);
2392 DRM_ERROR("radeon: audio resume failed\n");
2399 int r600_suspend(struct radeon_device *rdev)
2403 r600_audio_fini(rdev);
2404 /* FIXME: we should wait for ring to be empty */
2406 rdev->cp.ready = false;
2407 r600_irq_suspend(rdev);
2408 r600_wb_disable(rdev);
2409 r600_pcie_gart_disable(rdev);
2410 /* unpin shaders bo */
2411 if (rdev->r600_blit.shader_obj) {
2412 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2414 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2415 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2421 /* Plan is to move initialization in that function and use
2422 * helper function so that radeon_device_init pretty much
2423 * do nothing more than calling asic specific function. This
2424 * should also allow to remove a bunch of callback function
2427 int r600_init(struct radeon_device *rdev)
2431 r = radeon_dummy_page_init(rdev);
2434 if (r600_debugfs_mc_info_init(rdev)) {
2435 DRM_ERROR("Failed to register debugfs file for mc !\n");
2437 /* This don't do much */
2438 r = radeon_gem_init(rdev);
2442 if (!radeon_get_bios(rdev)) {
2443 if (ASIC_IS_AVIVO(rdev))
2446 /* Must be an ATOMBIOS */
2447 if (!rdev->is_atom_bios) {
2448 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2451 r = radeon_atombios_init(rdev);
2454 /* Post card if necessary */
2455 if (!r600_card_posted(rdev)) {
2457 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2460 DRM_INFO("GPU not posted. posting now...\n");
2461 atom_asic_init(rdev->mode_info.atom_context);
2463 /* Initialize scratch registers */
2464 r600_scratch_init(rdev);
2465 /* Initialize surface registers */
2466 radeon_surface_init(rdev);
2467 /* Initialize clocks */
2468 radeon_get_clock_info(rdev->ddev);
2469 r = radeon_clocks_init(rdev);
2473 r = radeon_fence_driver_init(rdev);
2476 if (rdev->flags & RADEON_IS_AGP) {
2477 r = radeon_agp_init(rdev);
2479 radeon_agp_disable(rdev);
2481 r = r600_mc_init(rdev);
2484 /* Memory manager */
2485 r = radeon_bo_init(rdev);
2489 r = radeon_irq_kms_init(rdev);
2493 rdev->cp.ring_obj = NULL;
2494 r600_ring_init(rdev, 1024 * 1024);
2496 rdev->ih.ring_obj = NULL;
2497 r600_ih_ring_init(rdev, 64 * 1024);
2499 r = r600_pcie_gart_init(rdev);
2503 rdev->accel_working = true;
2504 r = r600_startup(rdev);
2506 dev_err(rdev->dev, "disabling GPU acceleration\n");
2509 r600_irq_fini(rdev);
2510 radeon_irq_kms_fini(rdev);
2511 r600_pcie_gart_fini(rdev);
2512 rdev->accel_working = false;
2514 if (rdev->accel_working) {
2515 r = radeon_ib_pool_init(rdev);
2517 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2518 rdev->accel_working = false;
2520 r = r600_ib_test(rdev);
2522 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2523 rdev->accel_working = false;
2528 r = r600_audio_init(rdev);
2530 return r; /* TODO error handling */
2534 void r600_fini(struct radeon_device *rdev)
2536 r600_audio_fini(rdev);
2537 r600_blit_fini(rdev);
2540 r600_irq_fini(rdev);
2541 radeon_irq_kms_fini(rdev);
2542 r600_pcie_gart_fini(rdev);
2543 radeon_agp_fini(rdev);
2544 radeon_gem_fini(rdev);
2545 radeon_fence_driver_fini(rdev);
2546 radeon_clocks_fini(rdev);
2547 radeon_bo_fini(rdev);
2548 radeon_atombios_fini(rdev);
2551 radeon_dummy_page_fini(rdev);
2558 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2560 /* FIXME: implement */
2561 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2562 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2563 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2564 radeon_ring_write(rdev, ib->length_dw);
2567 int r600_ib_test(struct radeon_device *rdev)
2569 struct radeon_ib *ib;
2575 r = radeon_scratch_get(rdev, &scratch);
2577 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2580 WREG32(scratch, 0xCAFEDEAD);
2581 r = radeon_ib_get(rdev, &ib);
2583 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2586 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2587 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2588 ib->ptr[2] = 0xDEADBEEF;
2589 ib->ptr[3] = PACKET2(0);
2590 ib->ptr[4] = PACKET2(0);
2591 ib->ptr[5] = PACKET2(0);
2592 ib->ptr[6] = PACKET2(0);
2593 ib->ptr[7] = PACKET2(0);
2594 ib->ptr[8] = PACKET2(0);
2595 ib->ptr[9] = PACKET2(0);
2596 ib->ptr[10] = PACKET2(0);
2597 ib->ptr[11] = PACKET2(0);
2598 ib->ptr[12] = PACKET2(0);
2599 ib->ptr[13] = PACKET2(0);
2600 ib->ptr[14] = PACKET2(0);
2601 ib->ptr[15] = PACKET2(0);
2603 r = radeon_ib_schedule(rdev, ib);
2605 radeon_scratch_free(rdev, scratch);
2606 radeon_ib_free(rdev, &ib);
2607 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2610 r = radeon_fence_wait(ib->fence, false);
2612 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2615 for (i = 0; i < rdev->usec_timeout; i++) {
2616 tmp = RREG32(scratch);
2617 if (tmp == 0xDEADBEEF)
2621 if (i < rdev->usec_timeout) {
2622 DRM_INFO("ib test succeeded in %u usecs\n", i);
2624 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2628 radeon_scratch_free(rdev, scratch);
2629 radeon_ib_free(rdev, &ib);
2636 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2637 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2638 * writing to the ring and the GPU consuming, the GPU writes to the ring
2639 * and host consumes. As the host irq handler processes interrupts, it
2640 * increments the rptr. When the rptr catches up with the wptr, all the
2641 * current interrupts have been processed.
2644 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2648 /* Align ring size */
2649 rb_bufsz = drm_order(ring_size / 4);
2650 ring_size = (1 << rb_bufsz) * 4;
2651 rdev->ih.ring_size = ring_size;
2652 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2656 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2660 /* Allocate ring buffer */
2661 if (rdev->ih.ring_obj == NULL) {
2662 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2664 RADEON_GEM_DOMAIN_GTT,
2665 &rdev->ih.ring_obj);
2667 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2670 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2671 if (unlikely(r != 0))
2673 r = radeon_bo_pin(rdev->ih.ring_obj,
2674 RADEON_GEM_DOMAIN_GTT,
2675 &rdev->ih.gpu_addr);
2677 radeon_bo_unreserve(rdev->ih.ring_obj);
2678 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2681 r = radeon_bo_kmap(rdev->ih.ring_obj,
2682 (void **)&rdev->ih.ring);
2683 radeon_bo_unreserve(rdev->ih.ring_obj);
2685 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2692 static void r600_ih_ring_fini(struct radeon_device *rdev)
2695 if (rdev->ih.ring_obj) {
2696 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2697 if (likely(r == 0)) {
2698 radeon_bo_kunmap(rdev->ih.ring_obj);
2699 radeon_bo_unpin(rdev->ih.ring_obj);
2700 radeon_bo_unreserve(rdev->ih.ring_obj);
2702 radeon_bo_unref(&rdev->ih.ring_obj);
2703 rdev->ih.ring = NULL;
2704 rdev->ih.ring_obj = NULL;
2708 void r600_rlc_stop(struct radeon_device *rdev)
2711 if ((rdev->family >= CHIP_RV770) &&
2712 (rdev->family <= CHIP_RV740)) {
2713 /* r7xx asics need to soft reset RLC before halting */
2714 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2715 RREG32(SRBM_SOFT_RESET);
2717 WREG32(SRBM_SOFT_RESET, 0);
2718 RREG32(SRBM_SOFT_RESET);
2721 WREG32(RLC_CNTL, 0);
2724 static void r600_rlc_start(struct radeon_device *rdev)
2726 WREG32(RLC_CNTL, RLC_ENABLE);
2729 static int r600_rlc_init(struct radeon_device *rdev)
2732 const __be32 *fw_data;
2737 r600_rlc_stop(rdev);
2739 WREG32(RLC_HB_BASE, 0);
2740 WREG32(RLC_HB_CNTL, 0);
2741 WREG32(RLC_HB_RPTR, 0);
2742 WREG32(RLC_HB_WPTR, 0);
2743 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2744 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2745 WREG32(RLC_MC_CNTL, 0);
2746 WREG32(RLC_UCODE_CNTL, 0);
2748 fw_data = (const __be32 *)rdev->rlc_fw->data;
2749 if (rdev->family >= CHIP_CEDAR) {
2750 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2751 WREG32(RLC_UCODE_ADDR, i);
2752 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2754 } else if (rdev->family >= CHIP_RV770) {
2755 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2756 WREG32(RLC_UCODE_ADDR, i);
2757 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2760 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2761 WREG32(RLC_UCODE_ADDR, i);
2762 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2765 WREG32(RLC_UCODE_ADDR, 0);
2767 r600_rlc_start(rdev);
2772 static void r600_enable_interrupts(struct radeon_device *rdev)
2774 u32 ih_cntl = RREG32(IH_CNTL);
2775 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2777 ih_cntl |= ENABLE_INTR;
2778 ih_rb_cntl |= IH_RB_ENABLE;
2779 WREG32(IH_CNTL, ih_cntl);
2780 WREG32(IH_RB_CNTL, ih_rb_cntl);
2781 rdev->ih.enabled = true;
2784 void r600_disable_interrupts(struct radeon_device *rdev)
2786 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2787 u32 ih_cntl = RREG32(IH_CNTL);
2789 ih_rb_cntl &= ~IH_RB_ENABLE;
2790 ih_cntl &= ~ENABLE_INTR;
2791 WREG32(IH_RB_CNTL, ih_rb_cntl);
2792 WREG32(IH_CNTL, ih_cntl);
2793 /* set rptr, wptr to 0 */
2794 WREG32(IH_RB_RPTR, 0);
2795 WREG32(IH_RB_WPTR, 0);
2796 rdev->ih.enabled = false;
2801 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2805 WREG32(CP_INT_CNTL, 0);
2806 WREG32(GRBM_INT_CNTL, 0);
2807 WREG32(DxMODE_INT_MASK, 0);
2808 if (ASIC_IS_DCE3(rdev)) {
2809 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2810 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2811 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2812 WREG32(DC_HPD1_INT_CONTROL, tmp);
2813 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2814 WREG32(DC_HPD2_INT_CONTROL, tmp);
2815 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2816 WREG32(DC_HPD3_INT_CONTROL, tmp);
2817 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2818 WREG32(DC_HPD4_INT_CONTROL, tmp);
2819 if (ASIC_IS_DCE32(rdev)) {
2820 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2821 WREG32(DC_HPD5_INT_CONTROL, tmp);
2822 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2823 WREG32(DC_HPD6_INT_CONTROL, tmp);
2826 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2827 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2828 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2829 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2830 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2831 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2832 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2833 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2837 int r600_irq_init(struct radeon_device *rdev)
2841 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2844 ret = r600_ih_ring_alloc(rdev);
2849 r600_disable_interrupts(rdev);
2852 ret = r600_rlc_init(rdev);
2854 r600_ih_ring_fini(rdev);
2858 /* setup interrupt control */
2859 /* set dummy read address to ring address */
2860 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2861 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2862 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2863 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2865 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2866 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2867 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2868 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2870 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2871 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2873 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2874 IH_WPTR_OVERFLOW_CLEAR |
2876 /* WPTR writeback, not yet */
2877 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2878 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2879 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2881 WREG32(IH_RB_CNTL, ih_rb_cntl);
2883 /* set rptr, wptr to 0 */
2884 WREG32(IH_RB_RPTR, 0);
2885 WREG32(IH_RB_WPTR, 0);
2887 /* Default settings for IH_CNTL (disabled at first) */
2888 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2889 /* RPTR_REARM only works if msi's are enabled */
2890 if (rdev->msi_enabled)
2891 ih_cntl |= RPTR_REARM;
2894 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2896 WREG32(IH_CNTL, ih_cntl);
2898 /* force the active interrupt state to all disabled */
2899 if (rdev->family >= CHIP_CEDAR)
2900 evergreen_disable_interrupt_state(rdev);
2902 r600_disable_interrupt_state(rdev);
2905 r600_enable_interrupts(rdev);
2910 void r600_irq_suspend(struct radeon_device *rdev)
2912 r600_irq_disable(rdev);
2913 r600_rlc_stop(rdev);
2916 void r600_irq_fini(struct radeon_device *rdev)
2918 r600_irq_suspend(rdev);
2919 r600_ih_ring_fini(rdev);
2922 int r600_irq_set(struct radeon_device *rdev)
2924 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2926 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2927 u32 grbm_int_cntl = 0;
2930 if (!rdev->irq.installed) {
2931 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2934 /* don't enable anything if the ih is disabled */
2935 if (!rdev->ih.enabled) {
2936 r600_disable_interrupts(rdev);
2937 /* force the active interrupt state to all disabled */
2938 r600_disable_interrupt_state(rdev);
2942 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2943 if (ASIC_IS_DCE3(rdev)) {
2944 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2945 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2946 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2947 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2948 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2949 if (ASIC_IS_DCE32(rdev)) {
2950 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2951 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2954 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2955 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2956 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2957 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2960 if (rdev->irq.sw_int) {
2961 DRM_DEBUG("r600_irq_set: sw int\n");
2962 cp_int_cntl |= RB_INT_ENABLE;
2964 if (rdev->irq.crtc_vblank_int[0]) {
2965 DRM_DEBUG("r600_irq_set: vblank 0\n");
2966 mode_int |= D1MODE_VBLANK_INT_MASK;
2968 if (rdev->irq.crtc_vblank_int[1]) {
2969 DRM_DEBUG("r600_irq_set: vblank 1\n");
2970 mode_int |= D2MODE_VBLANK_INT_MASK;
2972 if (rdev->irq.hpd[0]) {
2973 DRM_DEBUG("r600_irq_set: hpd 1\n");
2974 hpd1 |= DC_HPDx_INT_EN;
2976 if (rdev->irq.hpd[1]) {
2977 DRM_DEBUG("r600_irq_set: hpd 2\n");
2978 hpd2 |= DC_HPDx_INT_EN;
2980 if (rdev->irq.hpd[2]) {
2981 DRM_DEBUG("r600_irq_set: hpd 3\n");
2982 hpd3 |= DC_HPDx_INT_EN;
2984 if (rdev->irq.hpd[3]) {
2985 DRM_DEBUG("r600_irq_set: hpd 4\n");
2986 hpd4 |= DC_HPDx_INT_EN;
2988 if (rdev->irq.hpd[4]) {
2989 DRM_DEBUG("r600_irq_set: hpd 5\n");
2990 hpd5 |= DC_HPDx_INT_EN;
2992 if (rdev->irq.hpd[5]) {
2993 DRM_DEBUG("r600_irq_set: hpd 6\n");
2994 hpd6 |= DC_HPDx_INT_EN;
2996 if (rdev->irq.hdmi[0]) {
2997 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2998 hdmi1 |= R600_HDMI_INT_EN;
3000 if (rdev->irq.hdmi[1]) {
3001 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3002 hdmi2 |= R600_HDMI_INT_EN;
3004 if (rdev->irq.gui_idle) {
3005 DRM_DEBUG("gui idle\n");
3006 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3009 WREG32(CP_INT_CNTL, cp_int_cntl);
3010 WREG32(DxMODE_INT_MASK, mode_int);
3011 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3012 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3013 if (ASIC_IS_DCE3(rdev)) {
3014 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3015 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3016 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3017 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3018 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3019 if (ASIC_IS_DCE32(rdev)) {
3020 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3021 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3024 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3025 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3026 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3027 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3033 static inline void r600_irq_ack(struct radeon_device *rdev,
3036 u32 *disp_int_cont2)
3040 if (ASIC_IS_DCE3(rdev)) {
3041 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3042 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3043 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3045 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
3046 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3047 *disp_int_cont2 = 0;
3050 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
3051 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3052 if (*disp_int & LB_D1_VLINE_INTERRUPT)
3053 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3054 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
3055 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3056 if (*disp_int & LB_D2_VLINE_INTERRUPT)
3057 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3058 if (*disp_int & DC_HPD1_INTERRUPT) {
3059 if (ASIC_IS_DCE3(rdev)) {
3060 tmp = RREG32(DC_HPD1_INT_CONTROL);
3061 tmp |= DC_HPDx_INT_ACK;
3062 WREG32(DC_HPD1_INT_CONTROL, tmp);
3064 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3065 tmp |= DC_HPDx_INT_ACK;
3066 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3069 if (*disp_int & DC_HPD2_INTERRUPT) {
3070 if (ASIC_IS_DCE3(rdev)) {
3071 tmp = RREG32(DC_HPD2_INT_CONTROL);
3072 tmp |= DC_HPDx_INT_ACK;
3073 WREG32(DC_HPD2_INT_CONTROL, tmp);
3075 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3076 tmp |= DC_HPDx_INT_ACK;
3077 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3080 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3081 if (ASIC_IS_DCE3(rdev)) {
3082 tmp = RREG32(DC_HPD3_INT_CONTROL);
3083 tmp |= DC_HPDx_INT_ACK;
3084 WREG32(DC_HPD3_INT_CONTROL, tmp);
3086 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3087 tmp |= DC_HPDx_INT_ACK;
3088 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3091 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3092 tmp = RREG32(DC_HPD4_INT_CONTROL);
3093 tmp |= DC_HPDx_INT_ACK;
3094 WREG32(DC_HPD4_INT_CONTROL, tmp);
3096 if (ASIC_IS_DCE32(rdev)) {
3097 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3098 tmp = RREG32(DC_HPD5_INT_CONTROL);
3099 tmp |= DC_HPDx_INT_ACK;
3100 WREG32(DC_HPD5_INT_CONTROL, tmp);
3102 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3103 tmp = RREG32(DC_HPD5_INT_CONTROL);
3104 tmp |= DC_HPDx_INT_ACK;
3105 WREG32(DC_HPD6_INT_CONTROL, tmp);
3108 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3109 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3111 if (ASIC_IS_DCE3(rdev)) {
3112 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3113 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3116 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3117 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3122 void r600_irq_disable(struct radeon_device *rdev)
3124 u32 disp_int, disp_int_cont, disp_int_cont2;
3126 r600_disable_interrupts(rdev);
3127 /* Wait and acknowledge irq */
3129 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3130 r600_disable_interrupt_state(rdev);
3133 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3137 /* XXX use writeback */
3138 wptr = RREG32(IH_RB_WPTR);
3140 if (wptr & RB_OVERFLOW) {
3141 /* When a ring buffer overflow happen start parsing interrupt
3142 * from the last not overwritten vector (wptr + 16). Hopefully
3143 * this should allow us to catchup.
3145 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3146 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3147 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3148 tmp = RREG32(IH_RB_CNTL);
3149 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3150 WREG32(IH_RB_CNTL, tmp);
3152 return (wptr & rdev->ih.ptr_mask);
3156 * Each IV ring entry is 128 bits:
3157 * [7:0] - interrupt source id
3159 * [59:32] - interrupt source data
3160 * [127:60] - reserved
3162 * The basic interrupt vector entries
3163 * are decoded as follows:
3164 * src_id src_data description
3169 * 19 0 FP Hot plug detection A
3170 * 19 1 FP Hot plug detection B
3171 * 19 2 DAC A auto-detection
3172 * 19 3 DAC B auto-detection
3178 * 181 - EOP Interrupt
3181 * Note, these are based on r600 and may need to be
3182 * adjusted or added to on newer asics
3185 int r600_irq_process(struct radeon_device *rdev)
3187 u32 wptr = r600_get_ih_wptr(rdev);
3188 u32 rptr = rdev->ih.rptr;
3189 u32 src_id, src_data;
3190 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3191 unsigned long flags;
3192 bool queue_hotplug = false;
3194 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3195 if (!rdev->ih.enabled)
3198 spin_lock_irqsave(&rdev->ih.lock, flags);
3201 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3204 if (rdev->shutdown) {
3205 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3210 /* display interrupts */
3211 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3213 rdev->ih.wptr = wptr;
3214 while (rptr != wptr) {
3215 /* wptr/rptr are in bytes! */
3216 ring_index = rptr / 4;
3217 src_id = rdev->ih.ring[ring_index] & 0xff;
3218 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3221 case 1: /* D1 vblank/vline */
3223 case 0: /* D1 vblank */
3224 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3225 drm_handle_vblank(rdev->ddev, 0);
3226 rdev->pm.vblank_sync = true;
3227 wake_up(&rdev->irq.vblank_queue);
3228 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3229 DRM_DEBUG("IH: D1 vblank\n");
3232 case 1: /* D1 vline */
3233 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3234 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3235 DRM_DEBUG("IH: D1 vline\n");
3239 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3243 case 5: /* D2 vblank/vline */
3245 case 0: /* D2 vblank */
3246 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3247 drm_handle_vblank(rdev->ddev, 1);
3248 rdev->pm.vblank_sync = true;
3249 wake_up(&rdev->irq.vblank_queue);
3250 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3251 DRM_DEBUG("IH: D2 vblank\n");
3254 case 1: /* D1 vline */
3255 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3256 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3257 DRM_DEBUG("IH: D2 vline\n");
3261 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3265 case 19: /* HPD/DAC hotplug */
3268 if (disp_int & DC_HPD1_INTERRUPT) {
3269 disp_int &= ~DC_HPD1_INTERRUPT;
3270 queue_hotplug = true;
3271 DRM_DEBUG("IH: HPD1\n");
3275 if (disp_int & DC_HPD2_INTERRUPT) {
3276 disp_int &= ~DC_HPD2_INTERRUPT;
3277 queue_hotplug = true;
3278 DRM_DEBUG("IH: HPD2\n");
3282 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3283 disp_int_cont &= ~DC_HPD3_INTERRUPT;
3284 queue_hotplug = true;
3285 DRM_DEBUG("IH: HPD3\n");
3289 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3290 disp_int_cont &= ~DC_HPD4_INTERRUPT;
3291 queue_hotplug = true;
3292 DRM_DEBUG("IH: HPD4\n");
3296 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3297 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3298 queue_hotplug = true;
3299 DRM_DEBUG("IH: HPD5\n");
3303 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3304 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3305 queue_hotplug = true;
3306 DRM_DEBUG("IH: HPD6\n");
3310 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3315 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3316 r600_audio_schedule_polling(rdev);
3318 case 176: /* CP_INT in ring buffer */
3319 case 177: /* CP_INT in IB1 */
3320 case 178: /* CP_INT in IB2 */
3321 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3322 radeon_fence_process(rdev);
3324 case 181: /* CP EOP event */
3325 DRM_DEBUG("IH: CP EOP\n");
3327 case 233: /* GUI IDLE */
3328 DRM_DEBUG("IH: CP EOP\n");
3329 rdev->pm.gui_idle = true;
3330 wake_up(&rdev->irq.idle_queue);
3333 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3337 /* wptr/rptr are in bytes! */
3339 rptr &= rdev->ih.ptr_mask;
3341 /* make sure wptr hasn't changed while processing */
3342 wptr = r600_get_ih_wptr(rdev);
3343 if (wptr != rdev->ih.wptr)
3346 queue_work(rdev->wq, &rdev->hotplug_work);
3347 rdev->ih.rptr = rptr;
3348 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3349 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3356 #if defined(CONFIG_DEBUG_FS)
3358 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3360 struct drm_info_node *node = (struct drm_info_node *) m->private;
3361 struct drm_device *dev = node->minor->dev;
3362 struct radeon_device *rdev = dev->dev_private;
3363 unsigned count, i, j;
3365 radeon_ring_free_size(rdev);
3366 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3367 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3368 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3369 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3370 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3371 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3372 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3373 seq_printf(m, "%u dwords in ring\n", count);
3375 for (j = 0; j <= count; j++) {
3376 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3377 i = (i + 1) & rdev->cp.ptr_mask;
3382 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3384 struct drm_info_node *node = (struct drm_info_node *) m->private;
3385 struct drm_device *dev = node->minor->dev;
3386 struct radeon_device *rdev = dev->dev_private;
3388 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3389 DREG32_SYS(m, rdev, VM_L2_STATUS);
3393 static struct drm_info_list r600_mc_info_list[] = {
3394 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3395 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3399 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3401 #if defined(CONFIG_DEBUG_FS)
3402 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3409 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3410 * rdev: radeon device structure
3411 * bo: buffer object struct which userspace is waiting for idle
3413 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3414 * through ring buffer, this leads to corruption in rendering, see
3415 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3416 * directly perform HDP flush by writing register through MMIO.
3418 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3420 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);