Merge tag 'v2.6.35-rc6' into drm-radeon-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / radeon / r600.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include "drmP.h"
33 #include "radeon_drm.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50
51 /* Firmware Names */
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
94
95 /* get temperature in millidegrees */
96 u32 rv6xx_get_temp(struct radeon_device *rdev)
97 {
98         u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99                 ASIC_T_SHIFT;
100         u32 actual_temp = 0;
101
102         if ((temp >> 7) & 1)
103                 actual_temp = 0;
104         else
105                 actual_temp = (temp >> 1) & 0xff;
106
107         return actual_temp * 1000;
108 }
109
110 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
111 {
112         int i;
113
114         rdev->pm.dynpm_can_upclock = true;
115         rdev->pm.dynpm_can_downclock = true;
116
117         /* power state array is low to high, default is first */
118         if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
119                 int min_power_state_index = 0;
120
121                 if (rdev->pm.num_power_states > 2)
122                         min_power_state_index = 1;
123
124                 switch (rdev->pm.dynpm_planned_action) {
125                 case DYNPM_ACTION_MINIMUM:
126                         rdev->pm.requested_power_state_index = min_power_state_index;
127                         rdev->pm.requested_clock_mode_index = 0;
128                         rdev->pm.dynpm_can_downclock = false;
129                         break;
130                 case DYNPM_ACTION_DOWNCLOCK:
131                         if (rdev->pm.current_power_state_index == min_power_state_index) {
132                                 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
133                                 rdev->pm.dynpm_can_downclock = false;
134                         } else {
135                                 if (rdev->pm.active_crtc_count > 1) {
136                                         for (i = 0; i < rdev->pm.num_power_states; i++) {
137                                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
138                                                         continue;
139                                                 else if (i >= rdev->pm.current_power_state_index) {
140                                                         rdev->pm.requested_power_state_index =
141                                                                 rdev->pm.current_power_state_index;
142                                                         break;
143                                                 } else {
144                                                         rdev->pm.requested_power_state_index = i;
145                                                         break;
146                                                 }
147                                         }
148                                 } else {
149                                         if (rdev->pm.current_power_state_index == 0)
150                                                 rdev->pm.requested_power_state_index =
151                                                         rdev->pm.num_power_states - 1;
152                                         else
153                                                 rdev->pm.requested_power_state_index =
154                                                         rdev->pm.current_power_state_index - 1;
155                                 }
156                         }
157                         rdev->pm.requested_clock_mode_index = 0;
158                         /* don't use the power state if crtcs are active and no display flag is set */
159                         if ((rdev->pm.active_crtc_count > 0) &&
160                             (rdev->pm.power_state[rdev->pm.requested_power_state_index].
161                              clock_info[rdev->pm.requested_clock_mode_index].flags &
162                              RADEON_PM_MODE_NO_DISPLAY)) {
163                                 rdev->pm.requested_power_state_index++;
164                         }
165                         break;
166                 case DYNPM_ACTION_UPCLOCK:
167                         if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
168                                 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
169                                 rdev->pm.dynpm_can_upclock = false;
170                         } else {
171                                 if (rdev->pm.active_crtc_count > 1) {
172                                         for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
173                                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
174                                                         continue;
175                                                 else if (i <= rdev->pm.current_power_state_index) {
176                                                         rdev->pm.requested_power_state_index =
177                                                                 rdev->pm.current_power_state_index;
178                                                         break;
179                                                 } else {
180                                                         rdev->pm.requested_power_state_index = i;
181                                                         break;
182                                                 }
183                                         }
184                                 } else
185                                         rdev->pm.requested_power_state_index =
186                                                 rdev->pm.current_power_state_index + 1;
187                         }
188                         rdev->pm.requested_clock_mode_index = 0;
189                         break;
190                 case DYNPM_ACTION_DEFAULT:
191                         rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
192                         rdev->pm.requested_clock_mode_index = 0;
193                         rdev->pm.dynpm_can_upclock = false;
194                         break;
195                 case DYNPM_ACTION_NONE:
196                 default:
197                         DRM_ERROR("Requested mode for not defined action\n");
198                         return;
199                 }
200         } else {
201                 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
202                 /* for now just select the first power state and switch between clock modes */
203                 /* power state array is low to high, default is first (0) */
204                 if (rdev->pm.active_crtc_count > 1) {
205                         rdev->pm.requested_power_state_index = -1;
206                         /* start at 1 as we don't want the default mode */
207                         for (i = 1; i < rdev->pm.num_power_states; i++) {
208                                 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
209                                         continue;
210                                 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
211                                          (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
212                                         rdev->pm.requested_power_state_index = i;
213                                         break;
214                                 }
215                         }
216                         /* if nothing selected, grab the default state. */
217                         if (rdev->pm.requested_power_state_index == -1)
218                                 rdev->pm.requested_power_state_index = 0;
219                 } else
220                         rdev->pm.requested_power_state_index = 1;
221
222                 switch (rdev->pm.dynpm_planned_action) {
223                 case DYNPM_ACTION_MINIMUM:
224                         rdev->pm.requested_clock_mode_index = 0;
225                         rdev->pm.dynpm_can_downclock = false;
226                         break;
227                 case DYNPM_ACTION_DOWNCLOCK:
228                         if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
229                                 if (rdev->pm.current_clock_mode_index == 0) {
230                                         rdev->pm.requested_clock_mode_index = 0;
231                                         rdev->pm.dynpm_can_downclock = false;
232                                 } else
233                                         rdev->pm.requested_clock_mode_index =
234                                                 rdev->pm.current_clock_mode_index - 1;
235                         } else {
236                                 rdev->pm.requested_clock_mode_index = 0;
237                                 rdev->pm.dynpm_can_downclock = false;
238                         }
239                         /* don't use the power state if crtcs are active and no display flag is set */
240                         if ((rdev->pm.active_crtc_count > 0) &&
241                             (rdev->pm.power_state[rdev->pm.requested_power_state_index].
242                              clock_info[rdev->pm.requested_clock_mode_index].flags &
243                              RADEON_PM_MODE_NO_DISPLAY)) {
244                                 rdev->pm.requested_clock_mode_index++;
245                         }
246                         break;
247                 case DYNPM_ACTION_UPCLOCK:
248                         if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
249                                 if (rdev->pm.current_clock_mode_index ==
250                                     (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
251                                         rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
252                                         rdev->pm.dynpm_can_upclock = false;
253                                 } else
254                                         rdev->pm.requested_clock_mode_index =
255                                                 rdev->pm.current_clock_mode_index + 1;
256                         } else {
257                                 rdev->pm.requested_clock_mode_index =
258                                         rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
259                                 rdev->pm.dynpm_can_upclock = false;
260                         }
261                         break;
262                 case DYNPM_ACTION_DEFAULT:
263                         rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
264                         rdev->pm.requested_clock_mode_index = 0;
265                         rdev->pm.dynpm_can_upclock = false;
266                         break;
267                 case DYNPM_ACTION_NONE:
268                 default:
269                         DRM_ERROR("Requested mode for not defined action\n");
270                         return;
271                 }
272         }
273
274         DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
275                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
276                   clock_info[rdev->pm.requested_clock_mode_index].sclk,
277                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
278                   clock_info[rdev->pm.requested_clock_mode_index].mclk,
279                   rdev->pm.power_state[rdev->pm.requested_power_state_index].
280                   pcie_lanes);
281 }
282
283 static int r600_pm_get_type_index(struct radeon_device *rdev,
284                                   enum radeon_pm_state_type ps_type,
285                                   int instance)
286 {
287         int i;
288         int found_instance = -1;
289
290         for (i = 0; i < rdev->pm.num_power_states; i++) {
291                 if (rdev->pm.power_state[i].type == ps_type) {
292                         found_instance++;
293                         if (found_instance == instance)
294                                 return i;
295                 }
296         }
297         /* return default if no match */
298         return rdev->pm.default_power_state_index;
299 }
300
301 void rs780_pm_init_profile(struct radeon_device *rdev)
302 {
303         if (rdev->pm.num_power_states == 2) {
304                 /* default */
305                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
306                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
307                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
308                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
309                 /* low sh */
310                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
311                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
312                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
313                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
314                 /* mid sh */
315                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
316                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
317                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
318                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
319                 /* high sh */
320                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
321                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
322                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
323                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
324                 /* low mh */
325                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
326                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
327                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
328                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
329                 /* mid mh */
330                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
331                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
332                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
333                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
334                 /* high mh */
335                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
336                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
337                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
338                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
339         } else if (rdev->pm.num_power_states == 3) {
340                 /* default */
341                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
342                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
343                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
344                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
345                 /* low sh */
346                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
347                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
348                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
349                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
350                 /* mid sh */
351                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
352                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
353                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
354                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
355                 /* high sh */
356                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
357                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
358                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
359                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
360                 /* low mh */
361                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
362                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
363                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
364                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
365                 /* mid mh */
366                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
367                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
368                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
369                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
370                 /* high mh */
371                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
372                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
373                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
374                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
375         } else {
376                 /* default */
377                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
378                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
379                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
380                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
381                 /* low sh */
382                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
383                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
384                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
385                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
386                 /* mid sh */
387                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
388                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
389                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
390                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
391                 /* high sh */
392                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
393                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
394                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
395                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
396                 /* low mh */
397                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
398                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
399                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
400                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
401                 /* mid mh */
402                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
403                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
404                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
405                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
406                 /* high mh */
407                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
408                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
409                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
410                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
411         }
412 }
413
414 void r600_pm_init_profile(struct radeon_device *rdev)
415 {
416         if (rdev->family == CHIP_R600) {
417                 /* XXX */
418                 /* default */
419                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
420                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
421                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
422                 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
423                 /* low sh */
424                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
425                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
426                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
427                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
428                 /* mid sh */
429                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
430                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
431                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
432                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
433                 /* high sh */
434                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
435                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
436                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
437                 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
438                 /* low mh */
439                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
440                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
441                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
442                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
443                 /* mid mh */
444                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
445                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
446                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
447                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
448                 /* high mh */
449                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
450                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
451                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
452                 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
453         } else {
454                 if (rdev->pm.num_power_states < 4) {
455                         /* default */
456                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
457                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
458                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
459                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
460                         /* low sh */
461                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
462                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
463                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
464                         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
465                         /* mid sh */
466                         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
467                         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
468                         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
469                         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
470                         /* high sh */
471                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
472                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
473                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
474                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
475                         /* low mh */
476                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
477                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
478                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
479                         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
480                         /* low mh */
481                         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
482                         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
483                         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
484                         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
485                         /* high mh */
486                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
487                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
488                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
489                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
490                 } else {
491                         /* default */
492                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
493                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
494                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
495                         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
496                         /* low sh */
497                         if (rdev->flags & RADEON_IS_MOBILITY) {
498                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
499                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
500                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
501                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
502                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
503                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
504                         } else {
505                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
506                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
507                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
508                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
510                                 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
511                         }
512                         /* mid sh */
513                         if (rdev->flags & RADEON_IS_MOBILITY) {
514                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
515                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
516                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
517                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
518                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
519                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
520                         } else {
521                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
522                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
523                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
524                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
525                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
526                                 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
527                         }
528                         /* high sh */
529                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
530                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
532                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
534                         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
535                         /* low mh */
536                         if (rdev->flags & RADEON_IS_MOBILITY) {
537                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
538                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
539                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
540                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
541                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
542                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
543                         } else {
544                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
545                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
546                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
547                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
549                                 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
550                         }
551                         /* mid mh */
552                         if (rdev->flags & RADEON_IS_MOBILITY) {
553                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
554                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
555                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
556                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
557                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
558                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
559                         } else {
560                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
561                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
562                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
563                                         r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
564                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
565                                 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
566                         }
567                         /* high mh */
568                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
569                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
571                                 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
573                         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
574                 }
575         }
576 }
577
578 void r600_pm_misc(struct radeon_device *rdev)
579 {
580         int req_ps_idx = rdev->pm.requested_power_state_index;
581         int req_cm_idx = rdev->pm.requested_clock_mode_index;
582         struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
583         struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
584
585         if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
586                 if (voltage->voltage != rdev->pm.current_vddc) {
587                         radeon_atom_set_voltage(rdev, voltage->voltage);
588                         rdev->pm.current_vddc = voltage->voltage;
589                         DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
590                 }
591         }
592 }
593
594 bool r600_gui_idle(struct radeon_device *rdev)
595 {
596         if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
597                 return false;
598         else
599                 return true;
600 }
601
602 /* hpd for digital panel detect/disconnect */
603 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
604 {
605         bool connected = false;
606
607         if (ASIC_IS_DCE3(rdev)) {
608                 switch (hpd) {
609                 case RADEON_HPD_1:
610                         if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
611                                 connected = true;
612                         break;
613                 case RADEON_HPD_2:
614                         if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
615                                 connected = true;
616                         break;
617                 case RADEON_HPD_3:
618                         if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
619                                 connected = true;
620                         break;
621                 case RADEON_HPD_4:
622                         if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
623                                 connected = true;
624                         break;
625                         /* DCE 3.2 */
626                 case RADEON_HPD_5:
627                         if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
628                                 connected = true;
629                         break;
630                 case RADEON_HPD_6:
631                         if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
632                                 connected = true;
633                         break;
634                 default:
635                         break;
636                 }
637         } else {
638                 switch (hpd) {
639                 case RADEON_HPD_1:
640                         if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
641                                 connected = true;
642                         break;
643                 case RADEON_HPD_2:
644                         if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
645                                 connected = true;
646                         break;
647                 case RADEON_HPD_3:
648                         if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
649                                 connected = true;
650                         break;
651                 default:
652                         break;
653                 }
654         }
655         return connected;
656 }
657
658 void r600_hpd_set_polarity(struct radeon_device *rdev,
659                            enum radeon_hpd_id hpd)
660 {
661         u32 tmp;
662         bool connected = r600_hpd_sense(rdev, hpd);
663
664         if (ASIC_IS_DCE3(rdev)) {
665                 switch (hpd) {
666                 case RADEON_HPD_1:
667                         tmp = RREG32(DC_HPD1_INT_CONTROL);
668                         if (connected)
669                                 tmp &= ~DC_HPDx_INT_POLARITY;
670                         else
671                                 tmp |= DC_HPDx_INT_POLARITY;
672                         WREG32(DC_HPD1_INT_CONTROL, tmp);
673                         break;
674                 case RADEON_HPD_2:
675                         tmp = RREG32(DC_HPD2_INT_CONTROL);
676                         if (connected)
677                                 tmp &= ~DC_HPDx_INT_POLARITY;
678                         else
679                                 tmp |= DC_HPDx_INT_POLARITY;
680                         WREG32(DC_HPD2_INT_CONTROL, tmp);
681                         break;
682                 case RADEON_HPD_3:
683                         tmp = RREG32(DC_HPD3_INT_CONTROL);
684                         if (connected)
685                                 tmp &= ~DC_HPDx_INT_POLARITY;
686                         else
687                                 tmp |= DC_HPDx_INT_POLARITY;
688                         WREG32(DC_HPD3_INT_CONTROL, tmp);
689                         break;
690                 case RADEON_HPD_4:
691                         tmp = RREG32(DC_HPD4_INT_CONTROL);
692                         if (connected)
693                                 tmp &= ~DC_HPDx_INT_POLARITY;
694                         else
695                                 tmp |= DC_HPDx_INT_POLARITY;
696                         WREG32(DC_HPD4_INT_CONTROL, tmp);
697                         break;
698                 case RADEON_HPD_5:
699                         tmp = RREG32(DC_HPD5_INT_CONTROL);
700                         if (connected)
701                                 tmp &= ~DC_HPDx_INT_POLARITY;
702                         else
703                                 tmp |= DC_HPDx_INT_POLARITY;
704                         WREG32(DC_HPD5_INT_CONTROL, tmp);
705                         break;
706                         /* DCE 3.2 */
707                 case RADEON_HPD_6:
708                         tmp = RREG32(DC_HPD6_INT_CONTROL);
709                         if (connected)
710                                 tmp &= ~DC_HPDx_INT_POLARITY;
711                         else
712                                 tmp |= DC_HPDx_INT_POLARITY;
713                         WREG32(DC_HPD6_INT_CONTROL, tmp);
714                         break;
715                 default:
716                         break;
717                 }
718         } else {
719                 switch (hpd) {
720                 case RADEON_HPD_1:
721                         tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
722                         if (connected)
723                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
724                         else
725                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
726                         WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
727                         break;
728                 case RADEON_HPD_2:
729                         tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
730                         if (connected)
731                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
732                         else
733                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
734                         WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
735                         break;
736                 case RADEON_HPD_3:
737                         tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
738                         if (connected)
739                                 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
740                         else
741                                 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
742                         WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
743                         break;
744                 default:
745                         break;
746                 }
747         }
748 }
749
750 void r600_hpd_init(struct radeon_device *rdev)
751 {
752         struct drm_device *dev = rdev->ddev;
753         struct drm_connector *connector;
754
755         if (ASIC_IS_DCE3(rdev)) {
756                 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
757                 if (ASIC_IS_DCE32(rdev))
758                         tmp |= DC_HPDx_EN;
759
760                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
761                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
762                         switch (radeon_connector->hpd.hpd) {
763                         case RADEON_HPD_1:
764                                 WREG32(DC_HPD1_CONTROL, tmp);
765                                 rdev->irq.hpd[0] = true;
766                                 break;
767                         case RADEON_HPD_2:
768                                 WREG32(DC_HPD2_CONTROL, tmp);
769                                 rdev->irq.hpd[1] = true;
770                                 break;
771                         case RADEON_HPD_3:
772                                 WREG32(DC_HPD3_CONTROL, tmp);
773                                 rdev->irq.hpd[2] = true;
774                                 break;
775                         case RADEON_HPD_4:
776                                 WREG32(DC_HPD4_CONTROL, tmp);
777                                 rdev->irq.hpd[3] = true;
778                                 break;
779                                 /* DCE 3.2 */
780                         case RADEON_HPD_5:
781                                 WREG32(DC_HPD5_CONTROL, tmp);
782                                 rdev->irq.hpd[4] = true;
783                                 break;
784                         case RADEON_HPD_6:
785                                 WREG32(DC_HPD6_CONTROL, tmp);
786                                 rdev->irq.hpd[5] = true;
787                                 break;
788                         default:
789                                 break;
790                         }
791                 }
792         } else {
793                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
794                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
795                         switch (radeon_connector->hpd.hpd) {
796                         case RADEON_HPD_1:
797                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
798                                 rdev->irq.hpd[0] = true;
799                                 break;
800                         case RADEON_HPD_2:
801                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
802                                 rdev->irq.hpd[1] = true;
803                                 break;
804                         case RADEON_HPD_3:
805                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
806                                 rdev->irq.hpd[2] = true;
807                                 break;
808                         default:
809                                 break;
810                         }
811                 }
812         }
813         if (rdev->irq.installed)
814                 r600_irq_set(rdev);
815 }
816
817 void r600_hpd_fini(struct radeon_device *rdev)
818 {
819         struct drm_device *dev = rdev->ddev;
820         struct drm_connector *connector;
821
822         if (ASIC_IS_DCE3(rdev)) {
823                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
824                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
825                         switch (radeon_connector->hpd.hpd) {
826                         case RADEON_HPD_1:
827                                 WREG32(DC_HPD1_CONTROL, 0);
828                                 rdev->irq.hpd[0] = false;
829                                 break;
830                         case RADEON_HPD_2:
831                                 WREG32(DC_HPD2_CONTROL, 0);
832                                 rdev->irq.hpd[1] = false;
833                                 break;
834                         case RADEON_HPD_3:
835                                 WREG32(DC_HPD3_CONTROL, 0);
836                                 rdev->irq.hpd[2] = false;
837                                 break;
838                         case RADEON_HPD_4:
839                                 WREG32(DC_HPD4_CONTROL, 0);
840                                 rdev->irq.hpd[3] = false;
841                                 break;
842                                 /* DCE 3.2 */
843                         case RADEON_HPD_5:
844                                 WREG32(DC_HPD5_CONTROL, 0);
845                                 rdev->irq.hpd[4] = false;
846                                 break;
847                         case RADEON_HPD_6:
848                                 WREG32(DC_HPD6_CONTROL, 0);
849                                 rdev->irq.hpd[5] = false;
850                                 break;
851                         default:
852                                 break;
853                         }
854                 }
855         } else {
856                 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
857                         struct radeon_connector *radeon_connector = to_radeon_connector(connector);
858                         switch (radeon_connector->hpd.hpd) {
859                         case RADEON_HPD_1:
860                                 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
861                                 rdev->irq.hpd[0] = false;
862                                 break;
863                         case RADEON_HPD_2:
864                                 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
865                                 rdev->irq.hpd[1] = false;
866                                 break;
867                         case RADEON_HPD_3:
868                                 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
869                                 rdev->irq.hpd[2] = false;
870                                 break;
871                         default:
872                                 break;
873                         }
874                 }
875         }
876 }
877
878 /*
879  * R600 PCIE GART
880  */
881 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
882 {
883         unsigned i;
884         u32 tmp;
885
886         /* flush hdp cache so updates hit vram */
887         WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
888
889         WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
890         WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
891         WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
892         for (i = 0; i < rdev->usec_timeout; i++) {
893                 /* read MC_STATUS */
894                 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
895                 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
896                 if (tmp == 2) {
897                         printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
898                         return;
899                 }
900                 if (tmp) {
901                         return;
902                 }
903                 udelay(1);
904         }
905 }
906
907 int r600_pcie_gart_init(struct radeon_device *rdev)
908 {
909         int r;
910
911         if (rdev->gart.table.vram.robj) {
912                 WARN(1, "R600 PCIE GART already initialized.\n");
913                 return 0;
914         }
915         /* Initialize common gart structure */
916         r = radeon_gart_init(rdev);
917         if (r)
918                 return r;
919         rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
920         return radeon_gart_table_vram_alloc(rdev);
921 }
922
923 int r600_pcie_gart_enable(struct radeon_device *rdev)
924 {
925         u32 tmp;
926         int r, i;
927
928         if (rdev->gart.table.vram.robj == NULL) {
929                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
930                 return -EINVAL;
931         }
932         r = radeon_gart_table_vram_pin(rdev);
933         if (r)
934                 return r;
935         radeon_gart_restore(rdev);
936
937         /* Setup L2 cache */
938         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
939                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
940                                 EFFECTIVE_L2_QUEUE_SIZE(7));
941         WREG32(VM_L2_CNTL2, 0);
942         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
943         /* Setup TLB control */
944         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
945                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
946                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
947                 ENABLE_WAIT_L2_QUERY;
948         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
949         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
950         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
951         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
952         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
953         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
954         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
955         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
956         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
957         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
958         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
959         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
960         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
961         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
962         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
963         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
964         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
965         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
966                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
967         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
968                         (u32)(rdev->dummy_page.addr >> 12));
969         for (i = 1; i < 7; i++)
970                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
971
972         r600_pcie_gart_tlb_flush(rdev);
973         rdev->gart.ready = true;
974         return 0;
975 }
976
977 void r600_pcie_gart_disable(struct radeon_device *rdev)
978 {
979         u32 tmp;
980         int i, r;
981
982         /* Disable all tables */
983         for (i = 0; i < 7; i++)
984                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
985
986         /* Disable L2 cache */
987         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
988                                 EFFECTIVE_L2_QUEUE_SIZE(7));
989         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
990         /* Setup L1 TLB control */
991         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
992                 ENABLE_WAIT_L2_QUERY;
993         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
994         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
995         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
996         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
997         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
998         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
999         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1000         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1001         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1002         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1003         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1004         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1005         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1006         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1007         if (rdev->gart.table.vram.robj) {
1008                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1009                 if (likely(r == 0)) {
1010                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
1011                         radeon_bo_unpin(rdev->gart.table.vram.robj);
1012                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
1013                 }
1014         }
1015 }
1016
1017 void r600_pcie_gart_fini(struct radeon_device *rdev)
1018 {
1019         radeon_gart_fini(rdev);
1020         r600_pcie_gart_disable(rdev);
1021         radeon_gart_table_vram_free(rdev);
1022 }
1023
1024 void r600_agp_enable(struct radeon_device *rdev)
1025 {
1026         u32 tmp;
1027         int i;
1028
1029         /* Setup L2 cache */
1030         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1031                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1032                                 EFFECTIVE_L2_QUEUE_SIZE(7));
1033         WREG32(VM_L2_CNTL2, 0);
1034         WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1035         /* Setup TLB control */
1036         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1037                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1038                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1039                 ENABLE_WAIT_L2_QUERY;
1040         WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1041         WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1042         WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1043         WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1044         WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1045         WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1046         WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1047         WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1048         WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1049         WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1050         WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1051         WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1052         WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1053         WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1054         for (i = 0; i < 7; i++)
1055                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1056 }
1057
1058 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1059 {
1060         unsigned i;
1061         u32 tmp;
1062
1063         for (i = 0; i < rdev->usec_timeout; i++) {
1064                 /* read MC_STATUS */
1065                 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1066                 if (!tmp)
1067                         return 0;
1068                 udelay(1);
1069         }
1070         return -1;
1071 }
1072
1073 static void r600_mc_program(struct radeon_device *rdev)
1074 {
1075         struct rv515_mc_save save;
1076         u32 tmp;
1077         int i, j;
1078
1079         /* Initialize HDP */
1080         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1081                 WREG32((0x2c14 + j), 0x00000000);
1082                 WREG32((0x2c18 + j), 0x00000000);
1083                 WREG32((0x2c1c + j), 0x00000000);
1084                 WREG32((0x2c20 + j), 0x00000000);
1085                 WREG32((0x2c24 + j), 0x00000000);
1086         }
1087         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1088
1089         rv515_mc_stop(rdev, &save);
1090         if (r600_mc_wait_for_idle(rdev)) {
1091                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1092         }
1093         /* Lockout access through VGA aperture (doesn't exist before R600) */
1094         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1095         /* Update configuration */
1096         if (rdev->flags & RADEON_IS_AGP) {
1097                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1098                         /* VRAM before AGP */
1099                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1100                                 rdev->mc.vram_start >> 12);
1101                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1102                                 rdev->mc.gtt_end >> 12);
1103                 } else {
1104                         /* VRAM after AGP */
1105                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1106                                 rdev->mc.gtt_start >> 12);
1107                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1108                                 rdev->mc.vram_end >> 12);
1109                 }
1110         } else {
1111                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1112                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1113         }
1114         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1115         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1116         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1117         WREG32(MC_VM_FB_LOCATION, tmp);
1118         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1119         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1120         WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1121         if (rdev->flags & RADEON_IS_AGP) {
1122                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1123                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1124                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1125         } else {
1126                 WREG32(MC_VM_AGP_BASE, 0);
1127                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1128                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1129         }
1130         if (r600_mc_wait_for_idle(rdev)) {
1131                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1132         }
1133         rv515_mc_resume(rdev, &save);
1134         /* we need to own VRAM, so turn off the VGA renderer here
1135          * to stop it overwriting our objects */
1136         rv515_vga_render_disable(rdev);
1137 }
1138
1139 /**
1140  * r600_vram_gtt_location - try to find VRAM & GTT location
1141  * @rdev: radeon device structure holding all necessary informations
1142  * @mc: memory controller structure holding memory informations
1143  *
1144  * Function will place try to place VRAM at same place as in CPU (PCI)
1145  * address space as some GPU seems to have issue when we reprogram at
1146  * different address space.
1147  *
1148  * If there is not enough space to fit the unvisible VRAM after the
1149  * aperture then we limit the VRAM size to the aperture.
1150  *
1151  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1152  * them to be in one from GPU point of view so that we can program GPU to
1153  * catch access outside them (weird GPU policy see ??).
1154  *
1155  * This function will never fails, worst case are limiting VRAM or GTT.
1156  *
1157  * Note: GTT start, end, size should be initialized before calling this
1158  * function on AGP platform.
1159  */
1160 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1161 {
1162         u64 size_bf, size_af;
1163
1164         if (mc->mc_vram_size > 0xE0000000) {
1165                 /* leave room for at least 512M GTT */
1166                 dev_warn(rdev->dev, "limiting VRAM\n");
1167                 mc->real_vram_size = 0xE0000000;
1168                 mc->mc_vram_size = 0xE0000000;
1169         }
1170         if (rdev->flags & RADEON_IS_AGP) {
1171                 size_bf = mc->gtt_start;
1172                 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1173                 if (size_bf > size_af) {
1174                         if (mc->mc_vram_size > size_bf) {
1175                                 dev_warn(rdev->dev, "limiting VRAM\n");
1176                                 mc->real_vram_size = size_bf;
1177                                 mc->mc_vram_size = size_bf;
1178                         }
1179                         mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1180                 } else {
1181                         if (mc->mc_vram_size > size_af) {
1182                                 dev_warn(rdev->dev, "limiting VRAM\n");
1183                                 mc->real_vram_size = size_af;
1184                                 mc->mc_vram_size = size_af;
1185                         }
1186                         mc->vram_start = mc->gtt_end;
1187                 }
1188                 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1189                 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1190                                 mc->mc_vram_size >> 20, mc->vram_start,
1191                                 mc->vram_end, mc->real_vram_size >> 20);
1192         } else {
1193                 u64 base = 0;
1194                 if (rdev->flags & RADEON_IS_IGP)
1195                         base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
1196                 radeon_vram_location(rdev, &rdev->mc, base);
1197                 rdev->mc.gtt_base_align = 0;
1198                 radeon_gtt_location(rdev, mc);
1199         }
1200 }
1201
1202 int r600_mc_init(struct radeon_device *rdev)
1203 {
1204         u32 tmp;
1205         int chansize, numchan;
1206
1207         /* Get VRAM informations */
1208         rdev->mc.vram_is_ddr = true;
1209         tmp = RREG32(RAMCFG);
1210         if (tmp & CHANSIZE_OVERRIDE) {
1211                 chansize = 16;
1212         } else if (tmp & CHANSIZE_MASK) {
1213                 chansize = 64;
1214         } else {
1215                 chansize = 32;
1216         }
1217         tmp = RREG32(CHMAP);
1218         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1219         case 0:
1220         default:
1221                 numchan = 1;
1222                 break;
1223         case 1:
1224                 numchan = 2;
1225                 break;
1226         case 2:
1227                 numchan = 4;
1228                 break;
1229         case 3:
1230                 numchan = 8;
1231                 break;
1232         }
1233         rdev->mc.vram_width = numchan * chansize;
1234         /* Could aper size report 0 ? */
1235         rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1236         rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1237         /* Setup GPU memory space */
1238         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1239         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1240         rdev->mc.visible_vram_size = rdev->mc.aper_size;
1241         r600_vram_gtt_location(rdev, &rdev->mc);
1242
1243         if (rdev->flags & RADEON_IS_IGP) {
1244                 rs690_pm_info(rdev);
1245                 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1246         }
1247         radeon_update_bandwidth_info(rdev);
1248         return 0;
1249 }
1250
1251 /* We doesn't check that the GPU really needs a reset we simply do the
1252  * reset, it's up to the caller to determine if the GPU needs one. We
1253  * might add an helper function to check that.
1254  */
1255 int r600_gpu_soft_reset(struct radeon_device *rdev)
1256 {
1257         struct rv515_mc_save save;
1258         u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1259                                 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1260                                 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1261                                 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1262                                 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1263                                 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1264                                 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1265                                 S_008010_GUI_ACTIVE(1);
1266         u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1267                         S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1268                         S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1269                         S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1270                         S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1271                         S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1272                         S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1273                         S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1274         u32 tmp;
1275
1276         dev_info(rdev->dev, "GPU softreset \n");
1277         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1278                 RREG32(R_008010_GRBM_STATUS));
1279         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1280                 RREG32(R_008014_GRBM_STATUS2));
1281         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1282                 RREG32(R_000E50_SRBM_STATUS));
1283         rv515_mc_stop(rdev, &save);
1284         if (r600_mc_wait_for_idle(rdev)) {
1285                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1286         }
1287         /* Disable CP parsing/prefetching */
1288         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1289         /* Check if any of the rendering block is busy and reset it */
1290         if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1291             (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1292                 tmp = S_008020_SOFT_RESET_CR(1) |
1293                         S_008020_SOFT_RESET_DB(1) |
1294                         S_008020_SOFT_RESET_CB(1) |
1295                         S_008020_SOFT_RESET_PA(1) |
1296                         S_008020_SOFT_RESET_SC(1) |
1297                         S_008020_SOFT_RESET_SMX(1) |
1298                         S_008020_SOFT_RESET_SPI(1) |
1299                         S_008020_SOFT_RESET_SX(1) |
1300                         S_008020_SOFT_RESET_SH(1) |
1301                         S_008020_SOFT_RESET_TC(1) |
1302                         S_008020_SOFT_RESET_TA(1) |
1303                         S_008020_SOFT_RESET_VC(1) |
1304                         S_008020_SOFT_RESET_VGT(1);
1305                 dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1306                 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1307                 RREG32(R_008020_GRBM_SOFT_RESET);
1308                 mdelay(15);
1309                 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1310         }
1311         /* Reset CP (we always reset CP) */
1312         tmp = S_008020_SOFT_RESET_CP(1);
1313         dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1314         WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1315         RREG32(R_008020_GRBM_SOFT_RESET);
1316         mdelay(15);
1317         WREG32(R_008020_GRBM_SOFT_RESET, 0);
1318         /* Wait a little for things to settle down */
1319         mdelay(1);
1320         dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1321                 RREG32(R_008010_GRBM_STATUS));
1322         dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1323                 RREG32(R_008014_GRBM_STATUS2));
1324         dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1325                 RREG32(R_000E50_SRBM_STATUS));
1326         rv515_mc_resume(rdev, &save);
1327         return 0;
1328 }
1329
1330 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1331 {
1332         u32 srbm_status;
1333         u32 grbm_status;
1334         u32 grbm_status2;
1335         int r;
1336
1337         srbm_status = RREG32(R_000E50_SRBM_STATUS);
1338         grbm_status = RREG32(R_008010_GRBM_STATUS);
1339         grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1340         if (!G_008010_GUI_ACTIVE(grbm_status)) {
1341                 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1342                 return false;
1343         }
1344         /* force CP activities */
1345         r = radeon_ring_lock(rdev, 2);
1346         if (!r) {
1347                 /* PACKET2 NOP */
1348                 radeon_ring_write(rdev, 0x80000000);
1349                 radeon_ring_write(rdev, 0x80000000);
1350                 radeon_ring_unlock_commit(rdev);
1351         }
1352         rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1353         return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1354 }
1355
1356 int r600_asic_reset(struct radeon_device *rdev)
1357 {
1358         return r600_gpu_soft_reset(rdev);
1359 }
1360
1361 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1362                                              u32 num_backends,
1363                                              u32 backend_disable_mask)
1364 {
1365         u32 backend_map = 0;
1366         u32 enabled_backends_mask;
1367         u32 enabled_backends_count;
1368         u32 cur_pipe;
1369         u32 swizzle_pipe[R6XX_MAX_PIPES];
1370         u32 cur_backend;
1371         u32 i;
1372
1373         if (num_tile_pipes > R6XX_MAX_PIPES)
1374                 num_tile_pipes = R6XX_MAX_PIPES;
1375         if (num_tile_pipes < 1)
1376                 num_tile_pipes = 1;
1377         if (num_backends > R6XX_MAX_BACKENDS)
1378                 num_backends = R6XX_MAX_BACKENDS;
1379         if (num_backends < 1)
1380                 num_backends = 1;
1381
1382         enabled_backends_mask = 0;
1383         enabled_backends_count = 0;
1384         for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1385                 if (((backend_disable_mask >> i) & 1) == 0) {
1386                         enabled_backends_mask |= (1 << i);
1387                         ++enabled_backends_count;
1388                 }
1389                 if (enabled_backends_count == num_backends)
1390                         break;
1391         }
1392
1393         if (enabled_backends_count == 0) {
1394                 enabled_backends_mask = 1;
1395                 enabled_backends_count = 1;
1396         }
1397
1398         if (enabled_backends_count != num_backends)
1399                 num_backends = enabled_backends_count;
1400
1401         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1402         switch (num_tile_pipes) {
1403         case 1:
1404                 swizzle_pipe[0] = 0;
1405                 break;
1406         case 2:
1407                 swizzle_pipe[0] = 0;
1408                 swizzle_pipe[1] = 1;
1409                 break;
1410         case 3:
1411                 swizzle_pipe[0] = 0;
1412                 swizzle_pipe[1] = 1;
1413                 swizzle_pipe[2] = 2;
1414                 break;
1415         case 4:
1416                 swizzle_pipe[0] = 0;
1417                 swizzle_pipe[1] = 1;
1418                 swizzle_pipe[2] = 2;
1419                 swizzle_pipe[3] = 3;
1420                 break;
1421         case 5:
1422                 swizzle_pipe[0] = 0;
1423                 swizzle_pipe[1] = 1;
1424                 swizzle_pipe[2] = 2;
1425                 swizzle_pipe[3] = 3;
1426                 swizzle_pipe[4] = 4;
1427                 break;
1428         case 6:
1429                 swizzle_pipe[0] = 0;
1430                 swizzle_pipe[1] = 2;
1431                 swizzle_pipe[2] = 4;
1432                 swizzle_pipe[3] = 5;
1433                 swizzle_pipe[4] = 1;
1434                 swizzle_pipe[5] = 3;
1435                 break;
1436         case 7:
1437                 swizzle_pipe[0] = 0;
1438                 swizzle_pipe[1] = 2;
1439                 swizzle_pipe[2] = 4;
1440                 swizzle_pipe[3] = 6;
1441                 swizzle_pipe[4] = 1;
1442                 swizzle_pipe[5] = 3;
1443                 swizzle_pipe[6] = 5;
1444                 break;
1445         case 8:
1446                 swizzle_pipe[0] = 0;
1447                 swizzle_pipe[1] = 2;
1448                 swizzle_pipe[2] = 4;
1449                 swizzle_pipe[3] = 6;
1450                 swizzle_pipe[4] = 1;
1451                 swizzle_pipe[5] = 3;
1452                 swizzle_pipe[6] = 5;
1453                 swizzle_pipe[7] = 7;
1454                 break;
1455         }
1456
1457         cur_backend = 0;
1458         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1459                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1460                         cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1461
1462                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1463
1464                 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1465         }
1466
1467         return backend_map;
1468 }
1469
1470 int r600_count_pipe_bits(uint32_t val)
1471 {
1472         int i, ret = 0;
1473
1474         for (i = 0; i < 32; i++) {
1475                 ret += val & 1;
1476                 val >>= 1;
1477         }
1478         return ret;
1479 }
1480
1481 void r600_gpu_init(struct radeon_device *rdev)
1482 {
1483         u32 tiling_config;
1484         u32 ramcfg;
1485         u32 backend_map;
1486         u32 cc_rb_backend_disable;
1487         u32 cc_gc_shader_pipe_config;
1488         u32 tmp;
1489         int i, j;
1490         u32 sq_config;
1491         u32 sq_gpr_resource_mgmt_1 = 0;
1492         u32 sq_gpr_resource_mgmt_2 = 0;
1493         u32 sq_thread_resource_mgmt = 0;
1494         u32 sq_stack_resource_mgmt_1 = 0;
1495         u32 sq_stack_resource_mgmt_2 = 0;
1496
1497         /* FIXME: implement */
1498         switch (rdev->family) {
1499         case CHIP_R600:
1500                 rdev->config.r600.max_pipes = 4;
1501                 rdev->config.r600.max_tile_pipes = 8;
1502                 rdev->config.r600.max_simds = 4;
1503                 rdev->config.r600.max_backends = 4;
1504                 rdev->config.r600.max_gprs = 256;
1505                 rdev->config.r600.max_threads = 192;
1506                 rdev->config.r600.max_stack_entries = 256;
1507                 rdev->config.r600.max_hw_contexts = 8;
1508                 rdev->config.r600.max_gs_threads = 16;
1509                 rdev->config.r600.sx_max_export_size = 128;
1510                 rdev->config.r600.sx_max_export_pos_size = 16;
1511                 rdev->config.r600.sx_max_export_smx_size = 128;
1512                 rdev->config.r600.sq_num_cf_insts = 2;
1513                 break;
1514         case CHIP_RV630:
1515         case CHIP_RV635:
1516                 rdev->config.r600.max_pipes = 2;
1517                 rdev->config.r600.max_tile_pipes = 2;
1518                 rdev->config.r600.max_simds = 3;
1519                 rdev->config.r600.max_backends = 1;
1520                 rdev->config.r600.max_gprs = 128;
1521                 rdev->config.r600.max_threads = 192;
1522                 rdev->config.r600.max_stack_entries = 128;
1523                 rdev->config.r600.max_hw_contexts = 8;
1524                 rdev->config.r600.max_gs_threads = 4;
1525                 rdev->config.r600.sx_max_export_size = 128;
1526                 rdev->config.r600.sx_max_export_pos_size = 16;
1527                 rdev->config.r600.sx_max_export_smx_size = 128;
1528                 rdev->config.r600.sq_num_cf_insts = 2;
1529                 break;
1530         case CHIP_RV610:
1531         case CHIP_RV620:
1532         case CHIP_RS780:
1533         case CHIP_RS880:
1534                 rdev->config.r600.max_pipes = 1;
1535                 rdev->config.r600.max_tile_pipes = 1;
1536                 rdev->config.r600.max_simds = 2;
1537                 rdev->config.r600.max_backends = 1;
1538                 rdev->config.r600.max_gprs = 128;
1539                 rdev->config.r600.max_threads = 192;
1540                 rdev->config.r600.max_stack_entries = 128;
1541                 rdev->config.r600.max_hw_contexts = 4;
1542                 rdev->config.r600.max_gs_threads = 4;
1543                 rdev->config.r600.sx_max_export_size = 128;
1544                 rdev->config.r600.sx_max_export_pos_size = 16;
1545                 rdev->config.r600.sx_max_export_smx_size = 128;
1546                 rdev->config.r600.sq_num_cf_insts = 1;
1547                 break;
1548         case CHIP_RV670:
1549                 rdev->config.r600.max_pipes = 4;
1550                 rdev->config.r600.max_tile_pipes = 4;
1551                 rdev->config.r600.max_simds = 4;
1552                 rdev->config.r600.max_backends = 4;
1553                 rdev->config.r600.max_gprs = 192;
1554                 rdev->config.r600.max_threads = 192;
1555                 rdev->config.r600.max_stack_entries = 256;
1556                 rdev->config.r600.max_hw_contexts = 8;
1557                 rdev->config.r600.max_gs_threads = 16;
1558                 rdev->config.r600.sx_max_export_size = 128;
1559                 rdev->config.r600.sx_max_export_pos_size = 16;
1560                 rdev->config.r600.sx_max_export_smx_size = 128;
1561                 rdev->config.r600.sq_num_cf_insts = 2;
1562                 break;
1563         default:
1564                 break;
1565         }
1566
1567         /* Initialize HDP */
1568         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1569                 WREG32((0x2c14 + j), 0x00000000);
1570                 WREG32((0x2c18 + j), 0x00000000);
1571                 WREG32((0x2c1c + j), 0x00000000);
1572                 WREG32((0x2c20 + j), 0x00000000);
1573                 WREG32((0x2c24 + j), 0x00000000);
1574         }
1575
1576         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1577
1578         /* Setup tiling */
1579         tiling_config = 0;
1580         ramcfg = RREG32(RAMCFG);
1581         switch (rdev->config.r600.max_tile_pipes) {
1582         case 1:
1583                 tiling_config |= PIPE_TILING(0);
1584                 break;
1585         case 2:
1586                 tiling_config |= PIPE_TILING(1);
1587                 break;
1588         case 4:
1589                 tiling_config |= PIPE_TILING(2);
1590                 break;
1591         case 8:
1592                 tiling_config |= PIPE_TILING(3);
1593                 break;
1594         default:
1595                 break;
1596         }
1597         rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1598         rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1599         tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1600         tiling_config |= GROUP_SIZE(0);
1601         rdev->config.r600.tiling_group_size = 256;
1602         tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1603         if (tmp > 3) {
1604                 tiling_config |= ROW_TILING(3);
1605                 tiling_config |= SAMPLE_SPLIT(3);
1606         } else {
1607                 tiling_config |= ROW_TILING(tmp);
1608                 tiling_config |= SAMPLE_SPLIT(tmp);
1609         }
1610         tiling_config |= BANK_SWAPS(1);
1611
1612         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1613         cc_rb_backend_disable |=
1614                 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1615
1616         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1617         cc_gc_shader_pipe_config |=
1618                 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1619         cc_gc_shader_pipe_config |=
1620                 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1621
1622         backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1623                                                         (R6XX_MAX_BACKENDS -
1624                                                          r600_count_pipe_bits((cc_rb_backend_disable &
1625                                                                                R6XX_MAX_BACKENDS_MASK) >> 16)),
1626                                                         (cc_rb_backend_disable >> 16));
1627         rdev->config.r600.tile_config = tiling_config;
1628         tiling_config |= BACKEND_MAP(backend_map);
1629         WREG32(GB_TILING_CONFIG, tiling_config);
1630         WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1631         WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1632
1633         /* Setup pipes */
1634         WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1635         WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1636         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1637
1638         tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1639         WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1640         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1641
1642         /* Setup some CP states */
1643         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1644         WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1645
1646         WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1647                              SYNC_WALKER | SYNC_ALIGNER));
1648         /* Setup various GPU states */
1649         if (rdev->family == CHIP_RV670)
1650                 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1651
1652         tmp = RREG32(SX_DEBUG_1);
1653         tmp |= SMX_EVENT_RELEASE;
1654         if ((rdev->family > CHIP_R600))
1655                 tmp |= ENABLE_NEW_SMX_ADDRESS;
1656         WREG32(SX_DEBUG_1, tmp);
1657
1658         if (((rdev->family) == CHIP_R600) ||
1659             ((rdev->family) == CHIP_RV630) ||
1660             ((rdev->family) == CHIP_RV610) ||
1661             ((rdev->family) == CHIP_RV620) ||
1662             ((rdev->family) == CHIP_RS780) ||
1663             ((rdev->family) == CHIP_RS880)) {
1664                 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1665         } else {
1666                 WREG32(DB_DEBUG, 0);
1667         }
1668         WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1669                                DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1670
1671         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1672         WREG32(VGT_NUM_INSTANCES, 0);
1673
1674         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1675         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1676
1677         tmp = RREG32(SQ_MS_FIFO_SIZES);
1678         if (((rdev->family) == CHIP_RV610) ||
1679             ((rdev->family) == CHIP_RV620) ||
1680             ((rdev->family) == CHIP_RS780) ||
1681             ((rdev->family) == CHIP_RS880)) {
1682                 tmp = (CACHE_FIFO_SIZE(0xa) |
1683                        FETCH_FIFO_HIWATER(0xa) |
1684                        DONE_FIFO_HIWATER(0xe0) |
1685                        ALU_UPDATE_FIFO_HIWATER(0x8));
1686         } else if (((rdev->family) == CHIP_R600) ||
1687                    ((rdev->family) == CHIP_RV630)) {
1688                 tmp &= ~DONE_FIFO_HIWATER(0xff);
1689                 tmp |= DONE_FIFO_HIWATER(0x4);
1690         }
1691         WREG32(SQ_MS_FIFO_SIZES, tmp);
1692
1693         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1694          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1695          */
1696         sq_config = RREG32(SQ_CONFIG);
1697         sq_config &= ~(PS_PRIO(3) |
1698                        VS_PRIO(3) |
1699                        GS_PRIO(3) |
1700                        ES_PRIO(3));
1701         sq_config |= (DX9_CONSTS |
1702                       VC_ENABLE |
1703                       PS_PRIO(0) |
1704                       VS_PRIO(1) |
1705                       GS_PRIO(2) |
1706                       ES_PRIO(3));
1707
1708         if ((rdev->family) == CHIP_R600) {
1709                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1710                                           NUM_VS_GPRS(124) |
1711                                           NUM_CLAUSE_TEMP_GPRS(4));
1712                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1713                                           NUM_ES_GPRS(0));
1714                 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1715                                            NUM_VS_THREADS(48) |
1716                                            NUM_GS_THREADS(4) |
1717                                            NUM_ES_THREADS(4));
1718                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1719                                             NUM_VS_STACK_ENTRIES(128));
1720                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1721                                             NUM_ES_STACK_ENTRIES(0));
1722         } else if (((rdev->family) == CHIP_RV610) ||
1723                    ((rdev->family) == CHIP_RV620) ||
1724                    ((rdev->family) == CHIP_RS780) ||
1725                    ((rdev->family) == CHIP_RS880)) {
1726                 /* no vertex cache */
1727                 sq_config &= ~VC_ENABLE;
1728
1729                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1730                                           NUM_VS_GPRS(44) |
1731                                           NUM_CLAUSE_TEMP_GPRS(2));
1732                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1733                                           NUM_ES_GPRS(17));
1734                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1735                                            NUM_VS_THREADS(78) |
1736                                            NUM_GS_THREADS(4) |
1737                                            NUM_ES_THREADS(31));
1738                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1739                                             NUM_VS_STACK_ENTRIES(40));
1740                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1741                                             NUM_ES_STACK_ENTRIES(16));
1742         } else if (((rdev->family) == CHIP_RV630) ||
1743                    ((rdev->family) == CHIP_RV635)) {
1744                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1745                                           NUM_VS_GPRS(44) |
1746                                           NUM_CLAUSE_TEMP_GPRS(2));
1747                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1748                                           NUM_ES_GPRS(18));
1749                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1750                                            NUM_VS_THREADS(78) |
1751                                            NUM_GS_THREADS(4) |
1752                                            NUM_ES_THREADS(31));
1753                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1754                                             NUM_VS_STACK_ENTRIES(40));
1755                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1756                                             NUM_ES_STACK_ENTRIES(16));
1757         } else if ((rdev->family) == CHIP_RV670) {
1758                 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1759                                           NUM_VS_GPRS(44) |
1760                                           NUM_CLAUSE_TEMP_GPRS(2));
1761                 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1762                                           NUM_ES_GPRS(17));
1763                 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1764                                            NUM_VS_THREADS(78) |
1765                                            NUM_GS_THREADS(4) |
1766                                            NUM_ES_THREADS(31));
1767                 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1768                                             NUM_VS_STACK_ENTRIES(64));
1769                 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1770                                             NUM_ES_STACK_ENTRIES(64));
1771         }
1772
1773         WREG32(SQ_CONFIG, sq_config);
1774         WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1775         WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1776         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1777         WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1778         WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1779
1780         if (((rdev->family) == CHIP_RV610) ||
1781             ((rdev->family) == CHIP_RV620) ||
1782             ((rdev->family) == CHIP_RS780) ||
1783             ((rdev->family) == CHIP_RS880)) {
1784                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1785         } else {
1786                 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1787         }
1788
1789         /* More default values. 2D/3D driver should adjust as needed */
1790         WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1791                                          S1_X(0x4) | S1_Y(0xc)));
1792         WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1793                                          S1_X(0x2) | S1_Y(0x2) |
1794                                          S2_X(0xa) | S2_Y(0x6) |
1795                                          S3_X(0x6) | S3_Y(0xa)));
1796         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1797                                              S1_X(0x4) | S1_Y(0xc) |
1798                                              S2_X(0x1) | S2_Y(0x6) |
1799                                              S3_X(0xa) | S3_Y(0xe)));
1800         WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1801                                              S5_X(0x0) | S5_Y(0x0) |
1802                                              S6_X(0xb) | S6_Y(0x4) |
1803                                              S7_X(0x7) | S7_Y(0x8)));
1804
1805         WREG32(VGT_STRMOUT_EN, 0);
1806         tmp = rdev->config.r600.max_pipes * 16;
1807         switch (rdev->family) {
1808         case CHIP_RV610:
1809         case CHIP_RV620:
1810         case CHIP_RS780:
1811         case CHIP_RS880:
1812                 tmp += 32;
1813                 break;
1814         case CHIP_RV670:
1815                 tmp += 128;
1816                 break;
1817         default:
1818                 break;
1819         }
1820         if (tmp > 256) {
1821                 tmp = 256;
1822         }
1823         WREG32(VGT_ES_PER_GS, 128);
1824         WREG32(VGT_GS_PER_ES, tmp);
1825         WREG32(VGT_GS_PER_VS, 2);
1826         WREG32(VGT_GS_VERTEX_REUSE, 16);
1827
1828         /* more default values. 2D/3D driver should adjust as needed */
1829         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1830         WREG32(VGT_STRMOUT_EN, 0);
1831         WREG32(SX_MISC, 0);
1832         WREG32(PA_SC_MODE_CNTL, 0);
1833         WREG32(PA_SC_AA_CONFIG, 0);
1834         WREG32(PA_SC_LINE_STIPPLE, 0);
1835         WREG32(SPI_INPUT_Z, 0);
1836         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1837         WREG32(CB_COLOR7_FRAG, 0);
1838
1839         /* Clear render buffer base addresses */
1840         WREG32(CB_COLOR0_BASE, 0);
1841         WREG32(CB_COLOR1_BASE, 0);
1842         WREG32(CB_COLOR2_BASE, 0);
1843         WREG32(CB_COLOR3_BASE, 0);
1844         WREG32(CB_COLOR4_BASE, 0);
1845         WREG32(CB_COLOR5_BASE, 0);
1846         WREG32(CB_COLOR6_BASE, 0);
1847         WREG32(CB_COLOR7_BASE, 0);
1848         WREG32(CB_COLOR7_FRAG, 0);
1849
1850         switch (rdev->family) {
1851         case CHIP_RV610:
1852         case CHIP_RV620:
1853         case CHIP_RS780:
1854         case CHIP_RS880:
1855                 tmp = TC_L2_SIZE(8);
1856                 break;
1857         case CHIP_RV630:
1858         case CHIP_RV635:
1859                 tmp = TC_L2_SIZE(4);
1860                 break;
1861         case CHIP_R600:
1862                 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1863                 break;
1864         default:
1865                 tmp = TC_L2_SIZE(0);
1866                 break;
1867         }
1868         WREG32(TC_CNTL, tmp);
1869
1870         tmp = RREG32(HDP_HOST_PATH_CNTL);
1871         WREG32(HDP_HOST_PATH_CNTL, tmp);
1872
1873         tmp = RREG32(ARB_POP);
1874         tmp |= ENABLE_TC128;
1875         WREG32(ARB_POP, tmp);
1876
1877         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1878         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1879                                NUM_CLIP_SEQ(3)));
1880         WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1881 }
1882
1883
1884 /*
1885  * Indirect registers accessor
1886  */
1887 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1888 {
1889         u32 r;
1890
1891         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1892         (void)RREG32(PCIE_PORT_INDEX);
1893         r = RREG32(PCIE_PORT_DATA);
1894         return r;
1895 }
1896
1897 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1898 {
1899         WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1900         (void)RREG32(PCIE_PORT_INDEX);
1901         WREG32(PCIE_PORT_DATA, (v));
1902         (void)RREG32(PCIE_PORT_DATA);
1903 }
1904
1905 /*
1906  * CP & Ring
1907  */
1908 void r600_cp_stop(struct radeon_device *rdev)
1909 {
1910         WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1911 }
1912
1913 int r600_init_microcode(struct radeon_device *rdev)
1914 {
1915         struct platform_device *pdev;
1916         const char *chip_name;
1917         const char *rlc_chip_name;
1918         size_t pfp_req_size, me_req_size, rlc_req_size;
1919         char fw_name[30];
1920         int err;
1921
1922         DRM_DEBUG("\n");
1923
1924         pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1925         err = IS_ERR(pdev);
1926         if (err) {
1927                 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1928                 return -EINVAL;
1929         }
1930
1931         switch (rdev->family) {
1932         case CHIP_R600:
1933                 chip_name = "R600";
1934                 rlc_chip_name = "R600";
1935                 break;
1936         case CHIP_RV610:
1937                 chip_name = "RV610";
1938                 rlc_chip_name = "R600";
1939                 break;
1940         case CHIP_RV630:
1941                 chip_name = "RV630";
1942                 rlc_chip_name = "R600";
1943                 break;
1944         case CHIP_RV620:
1945                 chip_name = "RV620";
1946                 rlc_chip_name = "R600";
1947                 break;
1948         case CHIP_RV635:
1949                 chip_name = "RV635";
1950                 rlc_chip_name = "R600";
1951                 break;
1952         case CHIP_RV670:
1953                 chip_name = "RV670";
1954                 rlc_chip_name = "R600";
1955                 break;
1956         case CHIP_RS780:
1957         case CHIP_RS880:
1958                 chip_name = "RS780";
1959                 rlc_chip_name = "R600";
1960                 break;
1961         case CHIP_RV770:
1962                 chip_name = "RV770";
1963                 rlc_chip_name = "R700";
1964                 break;
1965         case CHIP_RV730:
1966         case CHIP_RV740:
1967                 chip_name = "RV730";
1968                 rlc_chip_name = "R700";
1969                 break;
1970         case CHIP_RV710:
1971                 chip_name = "RV710";
1972                 rlc_chip_name = "R700";
1973                 break;
1974         case CHIP_CEDAR:
1975                 chip_name = "CEDAR";
1976                 rlc_chip_name = "CEDAR";
1977                 break;
1978         case CHIP_REDWOOD:
1979                 chip_name = "REDWOOD";
1980                 rlc_chip_name = "REDWOOD";
1981                 break;
1982         case CHIP_JUNIPER:
1983                 chip_name = "JUNIPER";
1984                 rlc_chip_name = "JUNIPER";
1985                 break;
1986         case CHIP_CYPRESS:
1987         case CHIP_HEMLOCK:
1988                 chip_name = "CYPRESS";
1989                 rlc_chip_name = "CYPRESS";
1990                 break;
1991         default: BUG();
1992         }
1993
1994         if (rdev->family >= CHIP_CEDAR) {
1995                 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1996                 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1997                 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1998         } else if (rdev->family >= CHIP_RV770) {
1999                 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2000                 me_req_size = R700_PM4_UCODE_SIZE * 4;
2001                 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2002         } else {
2003                 pfp_req_size = PFP_UCODE_SIZE * 4;
2004                 me_req_size = PM4_UCODE_SIZE * 12;
2005                 rlc_req_size = RLC_UCODE_SIZE * 4;
2006         }
2007
2008         DRM_INFO("Loading %s Microcode\n", chip_name);
2009
2010         snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2011         err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2012         if (err)
2013                 goto out;
2014         if (rdev->pfp_fw->size != pfp_req_size) {
2015                 printk(KERN_ERR
2016                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2017                        rdev->pfp_fw->size, fw_name);
2018                 err = -EINVAL;
2019                 goto out;
2020         }
2021
2022         snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2023         err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2024         if (err)
2025                 goto out;
2026         if (rdev->me_fw->size != me_req_size) {
2027                 printk(KERN_ERR
2028                        "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2029                        rdev->me_fw->size, fw_name);
2030                 err = -EINVAL;
2031         }
2032
2033         snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2034         err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2035         if (err)
2036                 goto out;
2037         if (rdev->rlc_fw->size != rlc_req_size) {
2038                 printk(KERN_ERR
2039                        "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2040                        rdev->rlc_fw->size, fw_name);
2041                 err = -EINVAL;
2042         }
2043
2044 out:
2045         platform_device_unregister(pdev);
2046
2047         if (err) {
2048                 if (err != -EINVAL)
2049                         printk(KERN_ERR
2050                                "r600_cp: Failed to load firmware \"%s\"\n",
2051                                fw_name);
2052                 release_firmware(rdev->pfp_fw);
2053                 rdev->pfp_fw = NULL;
2054                 release_firmware(rdev->me_fw);
2055                 rdev->me_fw = NULL;
2056                 release_firmware(rdev->rlc_fw);
2057                 rdev->rlc_fw = NULL;
2058         }
2059         return err;
2060 }
2061
2062 static int r600_cp_load_microcode(struct radeon_device *rdev)
2063 {
2064         const __be32 *fw_data;
2065         int i;
2066
2067         if (!rdev->me_fw || !rdev->pfp_fw)
2068                 return -EINVAL;
2069
2070         r600_cp_stop(rdev);
2071
2072         WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2073
2074         /* Reset cp */
2075         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2076         RREG32(GRBM_SOFT_RESET);
2077         mdelay(15);
2078         WREG32(GRBM_SOFT_RESET, 0);
2079
2080         WREG32(CP_ME_RAM_WADDR, 0);
2081
2082         fw_data = (const __be32 *)rdev->me_fw->data;
2083         WREG32(CP_ME_RAM_WADDR, 0);
2084         for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2085                 WREG32(CP_ME_RAM_DATA,
2086                        be32_to_cpup(fw_data++));
2087
2088         fw_data = (const __be32 *)rdev->pfp_fw->data;
2089         WREG32(CP_PFP_UCODE_ADDR, 0);
2090         for (i = 0; i < PFP_UCODE_SIZE; i++)
2091                 WREG32(CP_PFP_UCODE_DATA,
2092                        be32_to_cpup(fw_data++));
2093
2094         WREG32(CP_PFP_UCODE_ADDR, 0);
2095         WREG32(CP_ME_RAM_WADDR, 0);
2096         WREG32(CP_ME_RAM_RADDR, 0);
2097         return 0;
2098 }
2099
2100 int r600_cp_start(struct radeon_device *rdev)
2101 {
2102         int r;
2103         uint32_t cp_me;
2104
2105         r = radeon_ring_lock(rdev, 7);
2106         if (r) {
2107                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2108                 return r;
2109         }
2110         radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2111         radeon_ring_write(rdev, 0x1);
2112         if (rdev->family >= CHIP_CEDAR) {
2113                 radeon_ring_write(rdev, 0x0);
2114                 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2115         } else if (rdev->family >= CHIP_RV770) {
2116                 radeon_ring_write(rdev, 0x0);
2117                 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2118         } else {
2119                 radeon_ring_write(rdev, 0x3);
2120                 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2121         }
2122         radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2123         radeon_ring_write(rdev, 0);
2124         radeon_ring_write(rdev, 0);
2125         radeon_ring_unlock_commit(rdev);
2126
2127         cp_me = 0xff;
2128         WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2129         return 0;
2130 }
2131
2132 int r600_cp_resume(struct radeon_device *rdev)
2133 {
2134         u32 tmp;
2135         u32 rb_bufsz;
2136         int r;
2137
2138         /* Reset cp */
2139         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2140         RREG32(GRBM_SOFT_RESET);
2141         mdelay(15);
2142         WREG32(GRBM_SOFT_RESET, 0);
2143
2144         /* Set ring buffer size */
2145         rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2146         tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2147 #ifdef __BIG_ENDIAN
2148         tmp |= BUF_SWAP_32BIT;
2149 #endif
2150         WREG32(CP_RB_CNTL, tmp);
2151         WREG32(CP_SEM_WAIT_TIMER, 0x4);
2152
2153         /* Set the write pointer delay */
2154         WREG32(CP_RB_WPTR_DELAY, 0);
2155
2156         /* Initialize the ring buffer's read and write pointers */
2157         WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2158         WREG32(CP_RB_RPTR_WR, 0);
2159         WREG32(CP_RB_WPTR, 0);
2160         WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2161         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2162         mdelay(1);
2163         WREG32(CP_RB_CNTL, tmp);
2164
2165         WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2166         WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2167
2168         rdev->cp.rptr = RREG32(CP_RB_RPTR);
2169         rdev->cp.wptr = RREG32(CP_RB_WPTR);
2170
2171         r600_cp_start(rdev);
2172         rdev->cp.ready = true;
2173         r = radeon_ring_test(rdev);
2174         if (r) {
2175                 rdev->cp.ready = false;
2176                 return r;
2177         }
2178         return 0;
2179 }
2180
2181 void r600_cp_commit(struct radeon_device *rdev)
2182 {
2183         WREG32(CP_RB_WPTR, rdev->cp.wptr);
2184         (void)RREG32(CP_RB_WPTR);
2185 }
2186
2187 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2188 {
2189         u32 rb_bufsz;
2190
2191         /* Align ring size */
2192         rb_bufsz = drm_order(ring_size / 8);
2193         ring_size = (1 << (rb_bufsz + 1)) * 4;
2194         rdev->cp.ring_size = ring_size;
2195         rdev->cp.align_mask = 16 - 1;
2196 }
2197
2198 void r600_cp_fini(struct radeon_device *rdev)
2199 {
2200         r600_cp_stop(rdev);
2201         radeon_ring_fini(rdev);
2202 }
2203
2204
2205 /*
2206  * GPU scratch registers helpers function.
2207  */
2208 void r600_scratch_init(struct radeon_device *rdev)
2209 {
2210         int i;
2211
2212         rdev->scratch.num_reg = 7;
2213         for (i = 0; i < rdev->scratch.num_reg; i++) {
2214                 rdev->scratch.free[i] = true;
2215                 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2216         }
2217 }
2218
2219 int r600_ring_test(struct radeon_device *rdev)
2220 {
2221         uint32_t scratch;
2222         uint32_t tmp = 0;
2223         unsigned i;
2224         int r;
2225
2226         r = radeon_scratch_get(rdev, &scratch);
2227         if (r) {
2228                 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2229                 return r;
2230         }
2231         WREG32(scratch, 0xCAFEDEAD);
2232         r = radeon_ring_lock(rdev, 3);
2233         if (r) {
2234                 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2235                 radeon_scratch_free(rdev, scratch);
2236                 return r;
2237         }
2238         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2239         radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2240         radeon_ring_write(rdev, 0xDEADBEEF);
2241         radeon_ring_unlock_commit(rdev);
2242         for (i = 0; i < rdev->usec_timeout; i++) {
2243                 tmp = RREG32(scratch);
2244                 if (tmp == 0xDEADBEEF)
2245                         break;
2246                 DRM_UDELAY(1);
2247         }
2248         if (i < rdev->usec_timeout) {
2249                 DRM_INFO("ring test succeeded in %d usecs\n", i);
2250         } else {
2251                 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2252                           scratch, tmp);
2253                 r = -EINVAL;
2254         }
2255         radeon_scratch_free(rdev, scratch);
2256         return r;
2257 }
2258
2259 void r600_wb_disable(struct radeon_device *rdev)
2260 {
2261         int r;
2262
2263         WREG32(SCRATCH_UMSK, 0);
2264         if (rdev->wb.wb_obj) {
2265                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2266                 if (unlikely(r != 0))
2267                         return;
2268                 radeon_bo_kunmap(rdev->wb.wb_obj);
2269                 radeon_bo_unpin(rdev->wb.wb_obj);
2270                 radeon_bo_unreserve(rdev->wb.wb_obj);
2271         }
2272 }
2273
2274 void r600_wb_fini(struct radeon_device *rdev)
2275 {
2276         r600_wb_disable(rdev);
2277         if (rdev->wb.wb_obj) {
2278                 radeon_bo_unref(&rdev->wb.wb_obj);
2279                 rdev->wb.wb = NULL;
2280                 rdev->wb.wb_obj = NULL;
2281         }
2282 }
2283
2284 int r600_wb_enable(struct radeon_device *rdev)
2285 {
2286         int r;
2287
2288         if (rdev->wb.wb_obj == NULL) {
2289                 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2290                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2291                 if (r) {
2292                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2293                         return r;
2294                 }
2295                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2296                 if (unlikely(r != 0)) {
2297                         r600_wb_fini(rdev);
2298                         return r;
2299                 }
2300                 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2301                                 &rdev->wb.gpu_addr);
2302                 if (r) {
2303                         radeon_bo_unreserve(rdev->wb.wb_obj);
2304                         dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2305                         r600_wb_fini(rdev);
2306                         return r;
2307                 }
2308                 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2309                 radeon_bo_unreserve(rdev->wb.wb_obj);
2310                 if (r) {
2311                         dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2312                         r600_wb_fini(rdev);
2313                         return r;
2314                 }
2315         }
2316         WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2317         WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2318         WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2319         WREG32(SCRATCH_UMSK, 0xff);
2320         return 0;
2321 }
2322
2323 void r600_fence_ring_emit(struct radeon_device *rdev,
2324                           struct radeon_fence *fence)
2325 {
2326         /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
2327
2328         radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2329         radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2330         /* wait for 3D idle clean */
2331         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2332         radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2333         radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2334         /* Emit fence sequence & fire IRQ */
2335         radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2336         radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2337         radeon_ring_write(rdev, fence->seq);
2338         /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2339         radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2340         radeon_ring_write(rdev, RB_INT_STAT);
2341 }
2342
2343 int r600_copy_blit(struct radeon_device *rdev,
2344                    uint64_t src_offset, uint64_t dst_offset,
2345                    unsigned num_pages, struct radeon_fence *fence)
2346 {
2347         int r;
2348
2349         mutex_lock(&rdev->r600_blit.mutex);
2350         rdev->r600_blit.vb_ib = NULL;
2351         r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2352         if (r) {
2353                 if (rdev->r600_blit.vb_ib)
2354                         radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2355                 mutex_unlock(&rdev->r600_blit.mutex);
2356                 return r;
2357         }
2358         r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2359         r600_blit_done_copy(rdev, fence);
2360         mutex_unlock(&rdev->r600_blit.mutex);
2361         return 0;
2362 }
2363
2364 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2365                          uint32_t tiling_flags, uint32_t pitch,
2366                          uint32_t offset, uint32_t obj_size)
2367 {
2368         /* FIXME: implement */
2369         return 0;
2370 }
2371
2372 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2373 {
2374         /* FIXME: implement */
2375 }
2376
2377
2378 bool r600_card_posted(struct radeon_device *rdev)
2379 {
2380         uint32_t reg;
2381
2382         /* first check CRTCs */
2383         reg = RREG32(D1CRTC_CONTROL) |
2384                 RREG32(D2CRTC_CONTROL);
2385         if (reg & CRTC_EN)
2386                 return true;
2387
2388         /* then check MEM_SIZE, in case the crtcs are off */
2389         if (RREG32(CONFIG_MEMSIZE))
2390                 return true;
2391
2392         return false;
2393 }
2394
2395 int r600_startup(struct radeon_device *rdev)
2396 {
2397         int r;
2398
2399         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2400                 r = r600_init_microcode(rdev);
2401                 if (r) {
2402                         DRM_ERROR("Failed to load firmware!\n");
2403                         return r;
2404                 }
2405         }
2406
2407         r600_mc_program(rdev);
2408         if (rdev->flags & RADEON_IS_AGP) {
2409                 r600_agp_enable(rdev);
2410         } else {
2411                 r = r600_pcie_gart_enable(rdev);
2412                 if (r)
2413                         return r;
2414         }
2415         r600_gpu_init(rdev);
2416         r = r600_blit_init(rdev);
2417         if (r) {
2418                 r600_blit_fini(rdev);
2419                 rdev->asic->copy = NULL;
2420                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2421         }
2422         /* pin copy shader into vram */
2423         if (rdev->r600_blit.shader_obj) {
2424                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2425                 if (unlikely(r != 0))
2426                         return r;
2427                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2428                                 &rdev->r600_blit.shader_gpu_addr);
2429                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2430                 if (r) {
2431                         dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2432                         return r;
2433                 }
2434         }
2435         /* Enable IRQ */
2436         r = r600_irq_init(rdev);
2437         if (r) {
2438                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2439                 radeon_irq_kms_fini(rdev);
2440                 return r;
2441         }
2442         r600_irq_set(rdev);
2443
2444         r = radeon_ring_init(rdev, rdev->cp.ring_size);
2445         if (r)
2446                 return r;
2447         r = r600_cp_load_microcode(rdev);
2448         if (r)
2449                 return r;
2450         r = r600_cp_resume(rdev);
2451         if (r)
2452                 return r;
2453         /* write back buffer are not vital so don't worry about failure */
2454         r600_wb_enable(rdev);
2455         return 0;
2456 }
2457
2458 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2459 {
2460         uint32_t temp;
2461
2462         temp = RREG32(CONFIG_CNTL);
2463         if (state == false) {
2464                 temp &= ~(1<<0);
2465                 temp |= (1<<1);
2466         } else {
2467                 temp &= ~(1<<1);
2468         }
2469         WREG32(CONFIG_CNTL, temp);
2470 }
2471
2472 int r600_resume(struct radeon_device *rdev)
2473 {
2474         int r;
2475
2476         /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2477          * posting will perform necessary task to bring back GPU into good
2478          * shape.
2479          */
2480         /* post card */
2481         atom_asic_init(rdev->mode_info.atom_context);
2482         /* Initialize clocks */
2483         r = radeon_clocks_init(rdev);
2484         if (r) {
2485                 return r;
2486         }
2487
2488         r = r600_startup(rdev);
2489         if (r) {
2490                 DRM_ERROR("r600 startup failed on resume\n");
2491                 return r;
2492         }
2493
2494         r = r600_ib_test(rdev);
2495         if (r) {
2496                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2497                 return r;
2498         }
2499
2500         r = r600_audio_init(rdev);
2501         if (r) {
2502                 DRM_ERROR("radeon: audio resume failed\n");
2503                 return r;
2504         }
2505
2506         return r;
2507 }
2508
2509 int r600_suspend(struct radeon_device *rdev)
2510 {
2511         int r;
2512
2513         r600_audio_fini(rdev);
2514         /* FIXME: we should wait for ring to be empty */
2515         r600_cp_stop(rdev);
2516         rdev->cp.ready = false;
2517         r600_irq_suspend(rdev);
2518         r600_wb_disable(rdev);
2519         r600_pcie_gart_disable(rdev);
2520         /* unpin shaders bo */
2521         if (rdev->r600_blit.shader_obj) {
2522                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2523                 if (!r) {
2524                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
2525                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2526                 }
2527         }
2528         return 0;
2529 }
2530
2531 /* Plan is to move initialization in that function and use
2532  * helper function so that radeon_device_init pretty much
2533  * do nothing more than calling asic specific function. This
2534  * should also allow to remove a bunch of callback function
2535  * like vram_info.
2536  */
2537 int r600_init(struct radeon_device *rdev)
2538 {
2539         int r;
2540
2541         r = radeon_dummy_page_init(rdev);
2542         if (r)
2543                 return r;
2544         if (r600_debugfs_mc_info_init(rdev)) {
2545                 DRM_ERROR("Failed to register debugfs file for mc !\n");
2546         }
2547         /* This don't do much */
2548         r = radeon_gem_init(rdev);
2549         if (r)
2550                 return r;
2551         /* Read BIOS */
2552         if (!radeon_get_bios(rdev)) {
2553                 if (ASIC_IS_AVIVO(rdev))
2554                         return -EINVAL;
2555         }
2556         /* Must be an ATOMBIOS */
2557         if (!rdev->is_atom_bios) {
2558                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2559                 return -EINVAL;
2560         }
2561         r = radeon_atombios_init(rdev);
2562         if (r)
2563                 return r;
2564         /* Post card if necessary */
2565         if (!r600_card_posted(rdev)) {
2566                 if (!rdev->bios) {
2567                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2568                         return -EINVAL;
2569                 }
2570                 DRM_INFO("GPU not posted. posting now...\n");
2571                 atom_asic_init(rdev->mode_info.atom_context);
2572         }
2573         /* Initialize scratch registers */
2574         r600_scratch_init(rdev);
2575         /* Initialize surface registers */
2576         radeon_surface_init(rdev);
2577         /* Initialize clocks */
2578         radeon_get_clock_info(rdev->ddev);
2579         r = radeon_clocks_init(rdev);
2580         if (r)
2581                 return r;
2582         /* Fence driver */
2583         r = radeon_fence_driver_init(rdev);
2584         if (r)
2585                 return r;
2586         if (rdev->flags & RADEON_IS_AGP) {
2587                 r = radeon_agp_init(rdev);
2588                 if (r)
2589                         radeon_agp_disable(rdev);
2590         }
2591         r = r600_mc_init(rdev);
2592         if (r)
2593                 return r;
2594         /* Memory manager */
2595         r = radeon_bo_init(rdev);
2596         if (r)
2597                 return r;
2598
2599         r = radeon_irq_kms_init(rdev);
2600         if (r)
2601                 return r;
2602
2603         rdev->cp.ring_obj = NULL;
2604         r600_ring_init(rdev, 1024 * 1024);
2605
2606         rdev->ih.ring_obj = NULL;
2607         r600_ih_ring_init(rdev, 64 * 1024);
2608
2609         r = r600_pcie_gart_init(rdev);
2610         if (r)
2611                 return r;
2612
2613         rdev->accel_working = true;
2614         r = r600_startup(rdev);
2615         if (r) {
2616                 dev_err(rdev->dev, "disabling GPU acceleration\n");
2617                 r600_cp_fini(rdev);
2618                 r600_wb_fini(rdev);
2619                 r600_irq_fini(rdev);
2620                 radeon_irq_kms_fini(rdev);
2621                 r600_pcie_gart_fini(rdev);
2622                 rdev->accel_working = false;
2623         }
2624         if (rdev->accel_working) {
2625                 r = radeon_ib_pool_init(rdev);
2626                 if (r) {
2627                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2628                         rdev->accel_working = false;
2629                 } else {
2630                         r = r600_ib_test(rdev);
2631                         if (r) {
2632                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2633                                 rdev->accel_working = false;
2634                         }
2635                 }
2636         }
2637
2638         r = r600_audio_init(rdev);
2639         if (r)
2640                 return r; /* TODO error handling */
2641         return 0;
2642 }
2643
2644 void r600_fini(struct radeon_device *rdev)
2645 {
2646         r600_audio_fini(rdev);
2647         r600_blit_fini(rdev);
2648         r600_cp_fini(rdev);
2649         r600_wb_fini(rdev);
2650         r600_irq_fini(rdev);
2651         radeon_irq_kms_fini(rdev);
2652         r600_pcie_gart_fini(rdev);
2653         radeon_agp_fini(rdev);
2654         radeon_gem_fini(rdev);
2655         radeon_fence_driver_fini(rdev);
2656         radeon_clocks_fini(rdev);
2657         radeon_bo_fini(rdev);
2658         radeon_atombios_fini(rdev);
2659         kfree(rdev->bios);
2660         rdev->bios = NULL;
2661         radeon_dummy_page_fini(rdev);
2662 }
2663
2664
2665 /*
2666  * CS stuff
2667  */
2668 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2669 {
2670         /* FIXME: implement */
2671         radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2672         radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2673         radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2674         radeon_ring_write(rdev, ib->length_dw);
2675 }
2676
2677 int r600_ib_test(struct radeon_device *rdev)
2678 {
2679         struct radeon_ib *ib;
2680         uint32_t scratch;
2681         uint32_t tmp = 0;
2682         unsigned i;
2683         int r;
2684
2685         r = radeon_scratch_get(rdev, &scratch);
2686         if (r) {
2687                 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2688                 return r;
2689         }
2690         WREG32(scratch, 0xCAFEDEAD);
2691         r = radeon_ib_get(rdev, &ib);
2692         if (r) {
2693                 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2694                 return r;
2695         }
2696         ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2697         ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2698         ib->ptr[2] = 0xDEADBEEF;
2699         ib->ptr[3] = PACKET2(0);
2700         ib->ptr[4] = PACKET2(0);
2701         ib->ptr[5] = PACKET2(0);
2702         ib->ptr[6] = PACKET2(0);
2703         ib->ptr[7] = PACKET2(0);
2704         ib->ptr[8] = PACKET2(0);
2705         ib->ptr[9] = PACKET2(0);
2706         ib->ptr[10] = PACKET2(0);
2707         ib->ptr[11] = PACKET2(0);
2708         ib->ptr[12] = PACKET2(0);
2709         ib->ptr[13] = PACKET2(0);
2710         ib->ptr[14] = PACKET2(0);
2711         ib->ptr[15] = PACKET2(0);
2712         ib->length_dw = 16;
2713         r = radeon_ib_schedule(rdev, ib);
2714         if (r) {
2715                 radeon_scratch_free(rdev, scratch);
2716                 radeon_ib_free(rdev, &ib);
2717                 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2718                 return r;
2719         }
2720         r = radeon_fence_wait(ib->fence, false);
2721         if (r) {
2722                 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2723                 return r;
2724         }
2725         for (i = 0; i < rdev->usec_timeout; i++) {
2726                 tmp = RREG32(scratch);
2727                 if (tmp == 0xDEADBEEF)
2728                         break;
2729                 DRM_UDELAY(1);
2730         }
2731         if (i < rdev->usec_timeout) {
2732                 DRM_INFO("ib test succeeded in %u usecs\n", i);
2733         } else {
2734                 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2735                           scratch, tmp);
2736                 r = -EINVAL;
2737         }
2738         radeon_scratch_free(rdev, scratch);
2739         radeon_ib_free(rdev, &ib);
2740         return r;
2741 }
2742
2743 /*
2744  * Interrupts
2745  *
2746  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2747  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2748  * writing to the ring and the GPU consuming, the GPU writes to the ring
2749  * and host consumes.  As the host irq handler processes interrupts, it
2750  * increments the rptr.  When the rptr catches up with the wptr, all the
2751  * current interrupts have been processed.
2752  */
2753
2754 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2755 {
2756         u32 rb_bufsz;
2757
2758         /* Align ring size */
2759         rb_bufsz = drm_order(ring_size / 4);
2760         ring_size = (1 << rb_bufsz) * 4;
2761         rdev->ih.ring_size = ring_size;
2762         rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2763         rdev->ih.rptr = 0;
2764 }
2765
2766 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2767 {
2768         int r;
2769
2770         /* Allocate ring buffer */
2771         if (rdev->ih.ring_obj == NULL) {
2772                 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2773                                      true,
2774                                      RADEON_GEM_DOMAIN_GTT,
2775                                      &rdev->ih.ring_obj);
2776                 if (r) {
2777                         DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2778                         return r;
2779                 }
2780                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2781                 if (unlikely(r != 0))
2782                         return r;
2783                 r = radeon_bo_pin(rdev->ih.ring_obj,
2784                                   RADEON_GEM_DOMAIN_GTT,
2785                                   &rdev->ih.gpu_addr);
2786                 if (r) {
2787                         radeon_bo_unreserve(rdev->ih.ring_obj);
2788                         DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2789                         return r;
2790                 }
2791                 r = radeon_bo_kmap(rdev->ih.ring_obj,
2792                                    (void **)&rdev->ih.ring);
2793                 radeon_bo_unreserve(rdev->ih.ring_obj);
2794                 if (r) {
2795                         DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2796                         return r;
2797                 }
2798         }
2799         return 0;
2800 }
2801
2802 static void r600_ih_ring_fini(struct radeon_device *rdev)
2803 {
2804         int r;
2805         if (rdev->ih.ring_obj) {
2806                 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2807                 if (likely(r == 0)) {
2808                         radeon_bo_kunmap(rdev->ih.ring_obj);
2809                         radeon_bo_unpin(rdev->ih.ring_obj);
2810                         radeon_bo_unreserve(rdev->ih.ring_obj);
2811                 }
2812                 radeon_bo_unref(&rdev->ih.ring_obj);
2813                 rdev->ih.ring = NULL;
2814                 rdev->ih.ring_obj = NULL;
2815         }
2816 }
2817
2818 void r600_rlc_stop(struct radeon_device *rdev)
2819 {
2820
2821         if ((rdev->family >= CHIP_RV770) &&
2822             (rdev->family <= CHIP_RV740)) {
2823                 /* r7xx asics need to soft reset RLC before halting */
2824                 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2825                 RREG32(SRBM_SOFT_RESET);
2826                 udelay(15000);
2827                 WREG32(SRBM_SOFT_RESET, 0);
2828                 RREG32(SRBM_SOFT_RESET);
2829         }
2830
2831         WREG32(RLC_CNTL, 0);
2832 }
2833
2834 static void r600_rlc_start(struct radeon_device *rdev)
2835 {
2836         WREG32(RLC_CNTL, RLC_ENABLE);
2837 }
2838
2839 static int r600_rlc_init(struct radeon_device *rdev)
2840 {
2841         u32 i;
2842         const __be32 *fw_data;
2843
2844         if (!rdev->rlc_fw)
2845                 return -EINVAL;
2846
2847         r600_rlc_stop(rdev);
2848
2849         WREG32(RLC_HB_BASE, 0);
2850         WREG32(RLC_HB_CNTL, 0);
2851         WREG32(RLC_HB_RPTR, 0);
2852         WREG32(RLC_HB_WPTR, 0);
2853         WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2854         WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2855         WREG32(RLC_MC_CNTL, 0);
2856         WREG32(RLC_UCODE_CNTL, 0);
2857
2858         fw_data = (const __be32 *)rdev->rlc_fw->data;
2859         if (rdev->family >= CHIP_CEDAR) {
2860                 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2861                         WREG32(RLC_UCODE_ADDR, i);
2862                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2863                 }
2864         } else if (rdev->family >= CHIP_RV770) {
2865                 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2866                         WREG32(RLC_UCODE_ADDR, i);
2867                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2868                 }
2869         } else {
2870                 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2871                         WREG32(RLC_UCODE_ADDR, i);
2872                         WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2873                 }
2874         }
2875         WREG32(RLC_UCODE_ADDR, 0);
2876
2877         r600_rlc_start(rdev);
2878
2879         return 0;
2880 }
2881
2882 static void r600_enable_interrupts(struct radeon_device *rdev)
2883 {
2884         u32 ih_cntl = RREG32(IH_CNTL);
2885         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2886
2887         ih_cntl |= ENABLE_INTR;
2888         ih_rb_cntl |= IH_RB_ENABLE;
2889         WREG32(IH_CNTL, ih_cntl);
2890         WREG32(IH_RB_CNTL, ih_rb_cntl);
2891         rdev->ih.enabled = true;
2892 }
2893
2894 void r600_disable_interrupts(struct radeon_device *rdev)
2895 {
2896         u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2897         u32 ih_cntl = RREG32(IH_CNTL);
2898
2899         ih_rb_cntl &= ~IH_RB_ENABLE;
2900         ih_cntl &= ~ENABLE_INTR;
2901         WREG32(IH_RB_CNTL, ih_rb_cntl);
2902         WREG32(IH_CNTL, ih_cntl);
2903         /* set rptr, wptr to 0 */
2904         WREG32(IH_RB_RPTR, 0);
2905         WREG32(IH_RB_WPTR, 0);
2906         rdev->ih.enabled = false;
2907         rdev->ih.wptr = 0;
2908         rdev->ih.rptr = 0;
2909 }
2910
2911 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2912 {
2913         u32 tmp;
2914
2915         WREG32(CP_INT_CNTL, 0);
2916         WREG32(GRBM_INT_CNTL, 0);
2917         WREG32(DxMODE_INT_MASK, 0);
2918         if (ASIC_IS_DCE3(rdev)) {
2919                 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2920                 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2921                 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2922                 WREG32(DC_HPD1_INT_CONTROL, tmp);
2923                 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2924                 WREG32(DC_HPD2_INT_CONTROL, tmp);
2925                 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2926                 WREG32(DC_HPD3_INT_CONTROL, tmp);
2927                 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2928                 WREG32(DC_HPD4_INT_CONTROL, tmp);
2929                 if (ASIC_IS_DCE32(rdev)) {
2930                         tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2931                         WREG32(DC_HPD5_INT_CONTROL, tmp);
2932                         tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2933                         WREG32(DC_HPD6_INT_CONTROL, tmp);
2934                 }
2935         } else {
2936                 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2937                 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2938                 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2939                 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2940                 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2941                 WREG32(DC_HOT_PLUG_