blob: de88624d5f8736037c93ebf627318b463930c953 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Jerome Glisse3ce0a232009-09-08 10:10:24 +100029#include <linux/seq_file.h>
30#include <linux/firmware.h>
31#include <linux/platform_device.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020032#include "drmP.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100033#include "radeon_drm.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020034#include "radeon.h"
Daniel Vettere6990372010-03-11 21:19:17 +000035#include "radeon_asic.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "radeon_mode.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100037#include "r600d.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100038#include "atom.h"
Jerome Glissed39c3b82009-09-28 18:34:43 +020039#include "avivod.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040
Jerome Glisse3ce0a232009-09-08 10:10:24 +100041#define PFP_UCODE_SIZE 576
42#define PM4_UCODE_SIZE 1792
Alex Deucherd8f60cf2009-12-01 13:43:46 -050043#define RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100044#define R700_PFP_UCODE_SIZE 848
45#define R700_PM4_UCODE_SIZE 1360
Alex Deucherd8f60cf2009-12-01 13:43:46 -050046#define R700_RLC_UCODE_SIZE 1024
Alex Deucherfe251e22010-03-24 13:36:43 -040047#define EVERGREEN_PFP_UCODE_SIZE 1120
48#define EVERGREEN_PM4_UCODE_SIZE 1376
Alex Deucher45f9a392010-03-24 13:55:51 -040049#define EVERGREEN_RLC_UCODE_SIZE 768
Jerome Glisse3ce0a232009-09-08 10:10:24 +100050
51/* Firmware Names */
52MODULE_FIRMWARE("radeon/R600_pfp.bin");
53MODULE_FIRMWARE("radeon/R600_me.bin");
54MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55MODULE_FIRMWARE("radeon/RV610_me.bin");
56MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57MODULE_FIRMWARE("radeon/RV630_me.bin");
58MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59MODULE_FIRMWARE("radeon/RV620_me.bin");
60MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61MODULE_FIRMWARE("radeon/RV635_me.bin");
62MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63MODULE_FIRMWARE("radeon/RV670_me.bin");
64MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65MODULE_FIRMWARE("radeon/RS780_me.bin");
66MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67MODULE_FIRMWARE("radeon/RV770_me.bin");
68MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69MODULE_FIRMWARE("radeon/RV730_me.bin");
70MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71MODULE_FIRMWARE("radeon/RV710_me.bin");
Alex Deucherd8f60cf2009-12-01 13:43:46 -050072MODULE_FIRMWARE("radeon/R600_rlc.bin");
73MODULE_FIRMWARE("radeon/R700_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040074MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75MODULE_FIRMWARE("radeon/CEDAR_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040076MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040077MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040079MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040080MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040082MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
Dave Airliea7433742010-04-09 15:31:09 +100083MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
Alex Deucherfe251e22010-03-24 13:36:43 -040084MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
Alex Deucher45f9a392010-03-24 13:55:51 -040085MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
Alex Deucher439bd6c2010-11-22 17:56:31 -050086MODULE_FIRMWARE("radeon/PALM_pfp.bin");
87MODULE_FIRMWARE("radeon/PALM_me.bin");
88MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
Jerome Glisse3ce0a232009-09-08 10:10:24 +100089
90int r600_debugfs_mc_info_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020091
Jerome Glisse1a029b72009-10-06 19:04:30 +020092/* r600,rv610,rv630,rv620,rv635,rv670 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +020093int r600_mc_wait_for_idle(struct radeon_device *rdev);
94void r600_gpu_init(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100095void r600_fini(struct radeon_device *rdev);
Alex Deucher45f9a392010-03-24 13:55:51 -040096void r600_irq_disable(struct radeon_device *rdev);
Alex Deucher9e46a482011-01-06 18:49:35 -050097static void r600_pcie_gen2_enable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098
Alex Deucher21a81222010-07-02 12:58:16 -040099/* get temperature in millidegrees */
Alex Deucher20d391d2011-02-01 16:12:34 -0500100int rv6xx_get_temp(struct radeon_device *rdev)
Alex Deucher21a81222010-07-02 12:58:16 -0400101{
102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT;
Alex Deucher20d391d2011-02-01 16:12:34 -0500104 int actual_temp = temp & 0xff;
Alex Deucher21a81222010-07-02 12:58:16 -0400105
Alex Deucher20d391d2011-02-01 16:12:34 -0500106 if (temp & 0x100)
107 actual_temp -= 256;
108
109 return actual_temp * 1000;
Alex Deucher21a81222010-07-02 12:58:16 -0400110}
111
Alex Deucherce8f5372010-05-07 15:10:16 -0400112void r600_pm_get_dynpm_state(struct radeon_device *rdev)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400113{
114 int i;
115
Alex Deucherce8f5372010-05-07 15:10:16 -0400116 rdev->pm.dynpm_can_upclock = true;
117 rdev->pm.dynpm_can_downclock = true;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400118
119 /* power state array is low to high, default is first */
120 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
121 int min_power_state_index = 0;
122
123 if (rdev->pm.num_power_states > 2)
124 min_power_state_index = 1;
125
Alex Deucherce8f5372010-05-07 15:10:16 -0400126 switch (rdev->pm.dynpm_planned_action) {
127 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400128 rdev->pm.requested_power_state_index = min_power_state_index;
129 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400130 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400131 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400132 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400133 if (rdev->pm.current_power_state_index == min_power_state_index) {
134 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400135 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400136 } else {
137 if (rdev->pm.active_crtc_count > 1) {
138 for (i = 0; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400139 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400140 continue;
141 else if (i >= rdev->pm.current_power_state_index) {
142 rdev->pm.requested_power_state_index =
143 rdev->pm.current_power_state_index;
144 break;
145 } else {
146 rdev->pm.requested_power_state_index = i;
147 break;
148 }
149 }
Alex Deucher773c3fa2010-06-25 16:21:27 -0400150 } else {
151 if (rdev->pm.current_power_state_index == 0)
152 rdev->pm.requested_power_state_index =
153 rdev->pm.num_power_states - 1;
154 else
155 rdev->pm.requested_power_state_index =
156 rdev->pm.current_power_state_index - 1;
157 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400158 }
159 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherd7311172010-05-03 01:13:14 -0400160 /* don't use the power state if crtcs are active and no display flag is set */
161 if ((rdev->pm.active_crtc_count > 0) &&
162 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
163 clock_info[rdev->pm.requested_clock_mode_index].flags &
164 RADEON_PM_MODE_NO_DISPLAY)) {
165 rdev->pm.requested_power_state_index++;
166 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400167 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400168 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400169 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
170 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400171 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400172 } else {
173 if (rdev->pm.active_crtc_count > 1) {
174 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
Alex Deucherd7311172010-05-03 01:13:14 -0400175 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400176 continue;
177 else if (i <= rdev->pm.current_power_state_index) {
178 rdev->pm.requested_power_state_index =
179 rdev->pm.current_power_state_index;
180 break;
181 } else {
182 rdev->pm.requested_power_state_index = i;
183 break;
184 }
185 }
186 } else
187 rdev->pm.requested_power_state_index =
188 rdev->pm.current_power_state_index + 1;
189 }
190 rdev->pm.requested_clock_mode_index = 0;
191 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400192 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400193 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
194 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400195 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400196 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400197 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400198 default:
199 DRM_ERROR("Requested mode for not defined action\n");
200 return;
201 }
202 } else {
203 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
204 /* for now just select the first power state and switch between clock modes */
205 /* power state array is low to high, default is first (0) */
206 if (rdev->pm.active_crtc_count > 1) {
207 rdev->pm.requested_power_state_index = -1;
208 /* start at 1 as we don't want the default mode */
209 for (i = 1; i < rdev->pm.num_power_states; i++) {
Alex Deucherd7311172010-05-03 01:13:14 -0400210 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
Alex Deuchera48b9b42010-04-22 14:03:55 -0400211 continue;
212 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
213 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
214 rdev->pm.requested_power_state_index = i;
215 break;
216 }
217 }
218 /* if nothing selected, grab the default state. */
219 if (rdev->pm.requested_power_state_index == -1)
220 rdev->pm.requested_power_state_index = 0;
221 } else
222 rdev->pm.requested_power_state_index = 1;
223
Alex Deucherce8f5372010-05-07 15:10:16 -0400224 switch (rdev->pm.dynpm_planned_action) {
225 case DYNPM_ACTION_MINIMUM:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400226 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400227 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400228 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400229 case DYNPM_ACTION_DOWNCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400230 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
231 if (rdev->pm.current_clock_mode_index == 0) {
232 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400233 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400234 } else
235 rdev->pm.requested_clock_mode_index =
236 rdev->pm.current_clock_mode_index - 1;
237 } else {
238 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400239 rdev->pm.dynpm_can_downclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400240 }
Alex Deucherd7311172010-05-03 01:13:14 -0400241 /* don't use the power state if crtcs are active and no display flag is set */
242 if ((rdev->pm.active_crtc_count > 0) &&
243 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
244 clock_info[rdev->pm.requested_clock_mode_index].flags &
245 RADEON_PM_MODE_NO_DISPLAY)) {
246 rdev->pm.requested_clock_mode_index++;
247 }
Alex Deuchera48b9b42010-04-22 14:03:55 -0400248 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400249 case DYNPM_ACTION_UPCLOCK:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400250 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
251 if (rdev->pm.current_clock_mode_index ==
252 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
253 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
Alex Deucherce8f5372010-05-07 15:10:16 -0400254 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400255 } else
256 rdev->pm.requested_clock_mode_index =
257 rdev->pm.current_clock_mode_index + 1;
258 } else {
259 rdev->pm.requested_clock_mode_index =
260 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400261 rdev->pm.dynpm_can_upclock = false;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400262 }
263 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400264 case DYNPM_ACTION_DEFAULT:
Alex Deucher58e21df2010-03-22 13:31:08 -0400265 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
266 rdev->pm.requested_clock_mode_index = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400267 rdev->pm.dynpm_can_upclock = false;
Alex Deucher58e21df2010-03-22 13:31:08 -0400268 break;
Alex Deucherce8f5372010-05-07 15:10:16 -0400269 case DYNPM_ACTION_NONE:
Alex Deuchera48b9b42010-04-22 14:03:55 -0400270 default:
271 DRM_ERROR("Requested mode for not defined action\n");
272 return;
273 }
274 }
275
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000276 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
Alex Deucherce8a3eb2010-05-07 16:58:27 -0400277 rdev->pm.power_state[rdev->pm.requested_power_state_index].
278 clock_info[rdev->pm.requested_clock_mode_index].sclk,
279 rdev->pm.power_state[rdev->pm.requested_power_state_index].
280 clock_info[rdev->pm.requested_clock_mode_index].mclk,
281 rdev->pm.power_state[rdev->pm.requested_power_state_index].
282 pcie_lanes);
Alex Deuchera48b9b42010-04-22 14:03:55 -0400283}
284
Alex Deucherce8f5372010-05-07 15:10:16 -0400285static int r600_pm_get_type_index(struct radeon_device *rdev,
286 enum radeon_pm_state_type ps_type,
287 int instance)
Alex Deucherbae6b562010-04-22 13:38:05 -0400288{
Alex Deucherce8f5372010-05-07 15:10:16 -0400289 int i;
290 int found_instance = -1;
Alex Deuchera48b9b42010-04-22 14:03:55 -0400291
Alex Deucherce8f5372010-05-07 15:10:16 -0400292 for (i = 0; i < rdev->pm.num_power_states; i++) {
293 if (rdev->pm.power_state[i].type == ps_type) {
294 found_instance++;
295 if (found_instance == instance)
296 return i;
Alex Deuchera4248162010-04-24 14:50:23 -0400297 }
Alex Deucherce8f5372010-05-07 15:10:16 -0400298 }
299 /* return default if no match */
300 return rdev->pm.default_power_state_index;
301}
Alex Deucherbae6b562010-04-22 13:38:05 -0400302
Alex Deucherce8f5372010-05-07 15:10:16 -0400303void rs780_pm_init_profile(struct radeon_device *rdev)
304{
305 if (rdev->pm.num_power_states == 2) {
306 /* default */
307 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
308 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
309 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
311 /* low sh */
312 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400316 /* mid sh */
317 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400321 /* high sh */
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
326 /* low mh */
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400331 /* mid mh */
332 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
334 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400336 /* high mh */
337 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
339 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
341 } else if (rdev->pm.num_power_states == 3) {
342 /* default */
343 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
344 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
345 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
346 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
347 /* low sh */
348 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
349 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
350 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
351 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400352 /* mid sh */
353 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
354 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
355 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
356 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400357 /* high sh */
358 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
359 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
360 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
361 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
362 /* low mh */
363 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
364 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
365 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
366 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400367 /* mid mh */
368 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
369 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
370 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400372 /* high mh */
373 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
374 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
375 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
377 } else {
378 /* default */
379 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
380 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
381 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
382 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
383 /* low sh */
384 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
385 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
386 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
387 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400388 /* mid sh */
389 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
390 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
391 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400393 /* high sh */
394 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
395 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
396 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
397 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
398 /* low mh */
399 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
400 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
401 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
402 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400403 /* mid mh */
404 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
405 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
406 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
407 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400408 /* high mh */
409 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
410 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
411 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
412 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
413 }
414}
415
416void r600_pm_init_profile(struct radeon_device *rdev)
417{
418 if (rdev->family == CHIP_R600) {
419 /* XXX */
420 /* default */
421 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
422 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400424 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400425 /* low sh */
426 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400429 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400430 /* mid sh */
431 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
434 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400435 /* high sh */
436 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400439 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400440 /* low mh */
441 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400444 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400445 /* mid mh */
446 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
447 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
448 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
449 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400450 /* high mh */
451 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
452 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
453 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400454 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucherce8f5372010-05-07 15:10:16 -0400455 } else {
456 if (rdev->pm.num_power_states < 4) {
457 /* default */
458 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
459 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
460 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
461 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
462 /* low sh */
Alex Deucherce8f5372010-05-07 15:10:16 -0400463 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
464 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400466 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
467 /* mid sh */
468 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
469 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
470 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
471 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400472 /* high sh */
473 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
474 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
475 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
476 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
477 /* low mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400478 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
479 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
Alex Deucherce8f5372010-05-07 15:10:16 -0400480 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400481 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
482 /* low mh */
483 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
484 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
486 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucherce8f5372010-05-07 15:10:16 -0400487 /* high mh */
Alex Deucher4bff5172010-05-17 19:41:26 -0400488 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
489 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
491 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
492 } else {
493 /* default */
494 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
495 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
496 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
498 /* low sh */
499 if (rdev->flags & RADEON_IS_MOBILITY) {
500 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
501 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
502 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
503 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
504 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400505 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400506 } else {
507 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
508 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
510 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400512 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 }
514 /* mid sh */
515 if (rdev->flags & RADEON_IS_MOBILITY) {
516 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
517 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
518 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
519 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
520 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
521 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
522 } else {
523 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
524 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
525 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
526 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400529 }
530 /* high sh */
531 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
534 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
535 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
537 /* low mh */
538 if (rdev->flags & RADEON_IS_MOBILITY) {
539 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
541 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
542 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
543 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400544 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
Alex Deucher4bff5172010-05-17 19:41:26 -0400545 } else {
546 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
547 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
549 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
Alex Deucherc9e75b22010-06-02 17:56:01 -0400551 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 }
553 /* mid mh */
554 if (rdev->flags & RADEON_IS_MOBILITY) {
555 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
556 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
557 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
558 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
560 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
561 } else {
562 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
563 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
564 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
565 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
567 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
Alex Deucher4bff5172010-05-17 19:41:26 -0400568 }
569 /* high mh */
570 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
573 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
Alex Deucherce8f5372010-05-07 15:10:16 -0400574 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
575 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
576 }
577 }
Alex Deucherbae6b562010-04-22 13:38:05 -0400578}
579
Alex Deucher49e02b72010-04-23 17:57:27 -0400580void r600_pm_misc(struct radeon_device *rdev)
581{
Rafał Miłeckia081a9d2010-06-07 18:20:25 -0400582 int req_ps_idx = rdev->pm.requested_power_state_index;
583 int req_cm_idx = rdev->pm.requested_clock_mode_index;
584 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
585 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
Alex Deucher7ac9aa52010-05-27 19:25:54 -0400586
Alex Deucher4d601732010-06-07 18:15:18 -0400587 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
588 if (voltage->voltage != rdev->pm.current_vddc) {
589 radeon_atom_set_voltage(rdev, voltage->voltage);
590 rdev->pm.current_vddc = voltage->voltage;
Dave Airlied9fdaaf2010-08-02 10:42:55 +1000591 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
Alex Deucher4d601732010-06-07 18:15:18 -0400592 }
593 }
Alex Deucher49e02b72010-04-23 17:57:27 -0400594}
595
Alex Deucherdef9ba92010-04-22 12:39:58 -0400596bool r600_gui_idle(struct radeon_device *rdev)
597{
598 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
599 return false;
600 else
601 return true;
602}
603
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500604/* hpd for digital panel detect/disconnect */
605bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
606{
607 bool connected = false;
608
609 if (ASIC_IS_DCE3(rdev)) {
610 switch (hpd) {
611 case RADEON_HPD_1:
612 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
613 connected = true;
614 break;
615 case RADEON_HPD_2:
616 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
617 connected = true;
618 break;
619 case RADEON_HPD_3:
620 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
621 connected = true;
622 break;
623 case RADEON_HPD_4:
624 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
625 connected = true;
626 break;
627 /* DCE 3.2 */
628 case RADEON_HPD_5:
629 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
630 connected = true;
631 break;
632 case RADEON_HPD_6:
633 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
634 connected = true;
635 break;
636 default:
637 break;
638 }
639 } else {
640 switch (hpd) {
641 case RADEON_HPD_1:
642 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
643 connected = true;
644 break;
645 case RADEON_HPD_2:
646 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
647 connected = true;
648 break;
649 case RADEON_HPD_3:
650 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
651 connected = true;
652 break;
653 default:
654 break;
655 }
656 }
657 return connected;
658}
659
660void r600_hpd_set_polarity(struct radeon_device *rdev,
Alex Deucher429770b2009-12-04 15:26:55 -0500661 enum radeon_hpd_id hpd)
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500662{
663 u32 tmp;
664 bool connected = r600_hpd_sense(rdev, hpd);
665
666 if (ASIC_IS_DCE3(rdev)) {
667 switch (hpd) {
668 case RADEON_HPD_1:
669 tmp = RREG32(DC_HPD1_INT_CONTROL);
670 if (connected)
671 tmp &= ~DC_HPDx_INT_POLARITY;
672 else
673 tmp |= DC_HPDx_INT_POLARITY;
674 WREG32(DC_HPD1_INT_CONTROL, tmp);
675 break;
676 case RADEON_HPD_2:
677 tmp = RREG32(DC_HPD2_INT_CONTROL);
678 if (connected)
679 tmp &= ~DC_HPDx_INT_POLARITY;
680 else
681 tmp |= DC_HPDx_INT_POLARITY;
682 WREG32(DC_HPD2_INT_CONTROL, tmp);
683 break;
684 case RADEON_HPD_3:
685 tmp = RREG32(DC_HPD3_INT_CONTROL);
686 if (connected)
687 tmp &= ~DC_HPDx_INT_POLARITY;
688 else
689 tmp |= DC_HPDx_INT_POLARITY;
690 WREG32(DC_HPD3_INT_CONTROL, tmp);
691 break;
692 case RADEON_HPD_4:
693 tmp = RREG32(DC_HPD4_INT_CONTROL);
694 if (connected)
695 tmp &= ~DC_HPDx_INT_POLARITY;
696 else
697 tmp |= DC_HPDx_INT_POLARITY;
698 WREG32(DC_HPD4_INT_CONTROL, tmp);
699 break;
700 case RADEON_HPD_5:
701 tmp = RREG32(DC_HPD5_INT_CONTROL);
702 if (connected)
703 tmp &= ~DC_HPDx_INT_POLARITY;
704 else
705 tmp |= DC_HPDx_INT_POLARITY;
706 WREG32(DC_HPD5_INT_CONTROL, tmp);
707 break;
708 /* DCE 3.2 */
709 case RADEON_HPD_6:
710 tmp = RREG32(DC_HPD6_INT_CONTROL);
711 if (connected)
712 tmp &= ~DC_HPDx_INT_POLARITY;
713 else
714 tmp |= DC_HPDx_INT_POLARITY;
715 WREG32(DC_HPD6_INT_CONTROL, tmp);
716 break;
717 default:
718 break;
719 }
720 } else {
721 switch (hpd) {
722 case RADEON_HPD_1:
723 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
724 if (connected)
725 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
726 else
727 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
728 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
729 break;
730 case RADEON_HPD_2:
731 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
732 if (connected)
733 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
734 else
735 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
736 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
737 break;
738 case RADEON_HPD_3:
739 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
740 if (connected)
741 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
742 else
743 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
744 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
745 break;
746 default:
747 break;
748 }
749 }
750}
751
752void r600_hpd_init(struct radeon_device *rdev)
753{
754 struct drm_device *dev = rdev->ddev;
755 struct drm_connector *connector;
756
757 if (ASIC_IS_DCE3(rdev)) {
758 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
759 if (ASIC_IS_DCE32(rdev))
760 tmp |= DC_HPDx_EN;
761
762 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
763 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
764 switch (radeon_connector->hpd.hpd) {
765 case RADEON_HPD_1:
766 WREG32(DC_HPD1_CONTROL, tmp);
767 rdev->irq.hpd[0] = true;
768 break;
769 case RADEON_HPD_2:
770 WREG32(DC_HPD2_CONTROL, tmp);
771 rdev->irq.hpd[1] = true;
772 break;
773 case RADEON_HPD_3:
774 WREG32(DC_HPD3_CONTROL, tmp);
775 rdev->irq.hpd[2] = true;
776 break;
777 case RADEON_HPD_4:
778 WREG32(DC_HPD4_CONTROL, tmp);
779 rdev->irq.hpd[3] = true;
780 break;
781 /* DCE 3.2 */
782 case RADEON_HPD_5:
783 WREG32(DC_HPD5_CONTROL, tmp);
784 rdev->irq.hpd[4] = true;
785 break;
786 case RADEON_HPD_6:
787 WREG32(DC_HPD6_CONTROL, tmp);
788 rdev->irq.hpd[5] = true;
789 break;
790 default:
791 break;
792 }
793 }
794 } else {
795 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
796 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
797 switch (radeon_connector->hpd.hpd) {
798 case RADEON_HPD_1:
799 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
800 rdev->irq.hpd[0] = true;
801 break;
802 case RADEON_HPD_2:
803 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
804 rdev->irq.hpd[1] = true;
805 break;
806 case RADEON_HPD_3:
807 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
808 rdev->irq.hpd[2] = true;
809 break;
810 default:
811 break;
812 }
813 }
814 }
Jerome Glisse003e69f2010-01-07 15:39:14 +0100815 if (rdev->irq.installed)
816 r600_irq_set(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -0500817}
818
819void r600_hpd_fini(struct radeon_device *rdev)
820{
821 struct drm_device *dev = rdev->ddev;
822 struct drm_connector *connector;
823
824 if (ASIC_IS_DCE3(rdev)) {
825 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
826 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
827 switch (radeon_connector->hpd.hpd) {
828 case RADEON_HPD_1:
829 WREG32(DC_HPD1_CONTROL, 0);
830 rdev->irq.hpd[0] = false;
831 break;
832 case RADEON_HPD_2:
833 WREG32(DC_HPD2_CONTROL, 0);
834 rdev->irq.hpd[1] = false;
835 break;
836 case RADEON_HPD_3:
837 WREG32(DC_HPD3_CONTROL, 0);
838 rdev->irq.hpd[2] = false;
839 break;
840 case RADEON_HPD_4:
841 WREG32(DC_HPD4_CONTROL, 0);
842 rdev->irq.hpd[3] = false;
843 break;
844 /* DCE 3.2 */
845 case RADEON_HPD_5:
846 WREG32(DC_HPD5_CONTROL, 0);
847 rdev->irq.hpd[4] = false;
848 break;
849 case RADEON_HPD_6:
850 WREG32(DC_HPD6_CONTROL, 0);
851 rdev->irq.hpd[5] = false;
852 break;
853 default:
854 break;
855 }
856 }
857 } else {
858 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
859 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
860 switch (radeon_connector->hpd.hpd) {
861 case RADEON_HPD_1:
862 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
863 rdev->irq.hpd[0] = false;
864 break;
865 case RADEON_HPD_2:
866 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
867 rdev->irq.hpd[1] = false;
868 break;
869 case RADEON_HPD_3:
870 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
871 rdev->irq.hpd[2] = false;
872 break;
873 default:
874 break;
875 }
876 }
877 }
878}
879
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200880/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000881 * R600 PCIE GART
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200882 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000883void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200884{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000885 unsigned i;
886 u32 tmp;
887
Dave Airlie2e98f102010-02-15 15:54:45 +1000888 /* flush hdp cache so updates hit vram */
Alex Deucherf3886f82010-12-08 10:05:34 -0500889 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
890 !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher812d0462010-07-26 18:51:53 -0400891 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
892 u32 tmp;
893
894 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
895 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
Alex Deucherf3886f82010-12-08 10:05:34 -0500896 * This seems to cause problems on some AGP cards. Just use the old
897 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -0400898 */
899 WREG32(HDP_DEBUG1, 0);
900 tmp = readl((void __iomem *)ptr);
901 } else
902 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Dave Airlie2e98f102010-02-15 15:54:45 +1000903
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000904 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
905 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
906 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
907 for (i = 0; i < rdev->usec_timeout; i++) {
908 /* read MC_STATUS */
909 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
910 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
911 if (tmp == 2) {
912 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
913 return;
914 }
915 if (tmp) {
916 return;
917 }
918 udelay(1);
919 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200920}
921
Jerome Glisse4aac0472009-09-14 18:29:49 +0200922int r600_pcie_gart_init(struct radeon_device *rdev)
923{
924 int r;
925
926 if (rdev->gart.table.vram.robj) {
Joe Perchesfce7d612010-10-30 21:08:30 +0000927 WARN(1, "R600 PCIE GART already initialized\n");
Jerome Glisse4aac0472009-09-14 18:29:49 +0200928 return 0;
929 }
930 /* Initialize common gart structure */
931 r = radeon_gart_init(rdev);
932 if (r)
933 return r;
934 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
935 return radeon_gart_table_vram_alloc(rdev);
936}
937
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000938int r600_pcie_gart_enable(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200939{
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000940 u32 tmp;
941 int r, i;
942
Jerome Glisse4aac0472009-09-14 18:29:49 +0200943 if (rdev->gart.table.vram.robj == NULL) {
944 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
945 return -EINVAL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000946 }
Jerome Glisse4aac0472009-09-14 18:29:49 +0200947 r = radeon_gart_table_vram_pin(rdev);
948 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000949 return r;
Dave Airlie82568562010-02-05 16:00:07 +1000950 radeon_gart_restore(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +1000951
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000952 /* Setup L2 cache */
953 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
954 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
955 EFFECTIVE_L2_QUEUE_SIZE(7));
956 WREG32(VM_L2_CNTL2, 0);
957 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
958 /* Setup TLB control */
959 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
960 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
961 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
962 ENABLE_WAIT_L2_QUERY;
963 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
964 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
966 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
971 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
972 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
973 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
974 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
975 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
976 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
977 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
Jerome Glisse1a029b72009-10-06 19:04:30 +0200978 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000979 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
980 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
981 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
982 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
983 (u32)(rdev->dummy_page.addr >> 12));
984 for (i = 1; i < 7; i++)
985 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
986
987 r600_pcie_gart_tlb_flush(rdev);
988 rdev->gart.ready = true;
989 return 0;
990}
991
992void r600_pcie_gart_disable(struct radeon_device *rdev)
993{
994 u32 tmp;
Jerome Glisse4c788672009-11-20 14:29:23 +0100995 int i, r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000996
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000997 /* Disable all tables */
998 for (i = 0; i < 7; i++)
999 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1000
1001 /* Disable L2 cache */
1002 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1003 EFFECTIVE_L2_QUEUE_SIZE(7));
1004 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1005 /* Setup L1 TLB control */
1006 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1007 ENABLE_WAIT_L2_QUERY;
1008 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1014 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1015 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1016 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1017 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1018 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1019 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1020 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1021 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001022 if (rdev->gart.table.vram.robj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01001023 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1024 if (likely(r == 0)) {
1025 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1026 radeon_bo_unpin(rdev->gart.table.vram.robj);
1027 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1028 }
Jerome Glisse4aac0472009-09-14 18:29:49 +02001029 }
1030}
1031
1032void r600_pcie_gart_fini(struct radeon_device *rdev)
1033{
Jerome Glissef9274562010-03-17 14:44:29 +00001034 radeon_gart_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02001035 r600_pcie_gart_disable(rdev);
1036 radeon_gart_table_vram_free(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001037}
1038
Jerome Glisse1a029b72009-10-06 19:04:30 +02001039void r600_agp_enable(struct radeon_device *rdev)
1040{
1041 u32 tmp;
1042 int i;
1043
1044 /* Setup L2 cache */
1045 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1046 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1047 EFFECTIVE_L2_QUEUE_SIZE(7));
1048 WREG32(VM_L2_CNTL2, 0);
1049 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1050 /* Setup TLB control */
1051 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1052 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1053 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1054 ENABLE_WAIT_L2_QUERY;
1055 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1056 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1057 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1058 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1059 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1060 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1061 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1062 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1063 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1064 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1065 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1066 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1067 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1068 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1069 for (i = 0; i < 7; i++)
1070 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1071}
1072
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001073int r600_mc_wait_for_idle(struct radeon_device *rdev)
1074{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001075 unsigned i;
1076 u32 tmp;
1077
1078 for (i = 0; i < rdev->usec_timeout; i++) {
1079 /* read MC_STATUS */
1080 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1081 if (!tmp)
1082 return 0;
1083 udelay(1);
1084 }
1085 return -1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001086}
1087
Jerome Glissea3c19452009-10-01 18:02:13 +02001088static void r600_mc_program(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001089{
Jerome Glissea3c19452009-10-01 18:02:13 +02001090 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001091 u32 tmp;
1092 int i, j;
1093
1094 /* Initialize HDP */
1095 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1096 WREG32((0x2c14 + j), 0x00000000);
1097 WREG32((0x2c18 + j), 0x00000000);
1098 WREG32((0x2c1c + j), 0x00000000);
1099 WREG32((0x2c20 + j), 0x00000000);
1100 WREG32((0x2c24 + j), 0x00000000);
1101 }
1102 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1103
Jerome Glissea3c19452009-10-01 18:02:13 +02001104 rv515_mc_stop(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001105 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001106 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001107 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001108 /* Lockout access through VGA aperture (doesn't exist before R600) */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001109 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001110 /* Update configuration */
Jerome Glisse1a029b72009-10-06 19:04:30 +02001111 if (rdev->flags & RADEON_IS_AGP) {
1112 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1113 /* VRAM before AGP */
1114 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1115 rdev->mc.vram_start >> 12);
1116 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1117 rdev->mc.gtt_end >> 12);
1118 } else {
1119 /* VRAM after AGP */
1120 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1121 rdev->mc.gtt_start >> 12);
1122 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1123 rdev->mc.vram_end >> 12);
1124 }
1125 } else {
1126 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1127 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1128 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001129 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001130 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001131 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1132 WREG32(MC_VM_FB_LOCATION, tmp);
1133 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1134 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
Jerome Glisse46fcd2b2010-06-03 19:34:48 +02001135 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001136 if (rdev->flags & RADEON_IS_AGP) {
Jerome Glisse1a029b72009-10-06 19:04:30 +02001137 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1138 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001139 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1140 } else {
1141 WREG32(MC_VM_AGP_BASE, 0);
1142 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1143 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1144 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001145 if (r600_mc_wait_for_idle(rdev)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001146 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001147 }
Jerome Glissea3c19452009-10-01 18:02:13 +02001148 rv515_mc_resume(rdev, &save);
Dave Airlie698443d2009-09-18 14:16:38 +10001149 /* we need to own VRAM, so turn off the VGA renderer here
1150 * to stop it overwriting our objects */
Jerome Glissed39c3b82009-09-28 18:34:43 +02001151 rv515_vga_render_disable(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001152}
1153
Jerome Glissed594e462010-02-17 21:54:29 +00001154/**
1155 * r600_vram_gtt_location - try to find VRAM & GTT location
1156 * @rdev: radeon device structure holding all necessary informations
1157 * @mc: memory controller structure holding memory informations
1158 *
1159 * Function will place try to place VRAM at same place as in CPU (PCI)
1160 * address space as some GPU seems to have issue when we reprogram at
1161 * different address space.
1162 *
1163 * If there is not enough space to fit the unvisible VRAM after the
1164 * aperture then we limit the VRAM size to the aperture.
1165 *
1166 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1167 * them to be in one from GPU point of view so that we can program GPU to
1168 * catch access outside them (weird GPU policy see ??).
1169 *
1170 * This function will never fails, worst case are limiting VRAM or GTT.
1171 *
1172 * Note: GTT start, end, size should be initialized before calling this
1173 * function on AGP platform.
1174 */
Alex Deucher0ef0c1f2010-11-22 17:56:26 -05001175static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
Jerome Glissed594e462010-02-17 21:54:29 +00001176{
1177 u64 size_bf, size_af;
1178
1179 if (mc->mc_vram_size > 0xE0000000) {
1180 /* leave room for at least 512M GTT */
1181 dev_warn(rdev->dev, "limiting VRAM\n");
1182 mc->real_vram_size = 0xE0000000;
1183 mc->mc_vram_size = 0xE0000000;
1184 }
1185 if (rdev->flags & RADEON_IS_AGP) {
1186 size_bf = mc->gtt_start;
1187 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1188 if (size_bf > size_af) {
1189 if (mc->mc_vram_size > size_bf) {
1190 dev_warn(rdev->dev, "limiting VRAM\n");
1191 mc->real_vram_size = size_bf;
1192 mc->mc_vram_size = size_bf;
1193 }
1194 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1195 } else {
1196 if (mc->mc_vram_size > size_af) {
1197 dev_warn(rdev->dev, "limiting VRAM\n");
1198 mc->real_vram_size = size_af;
1199 mc->mc_vram_size = size_af;
1200 }
1201 mc->vram_start = mc->gtt_end;
1202 }
1203 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1204 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1205 mc->mc_vram_size >> 20, mc->vram_start,
1206 mc->vram_end, mc->real_vram_size >> 20);
1207 } else {
1208 u64 base = 0;
Alex Deucher8961d522010-12-03 14:37:22 -05001209 if (rdev->flags & RADEON_IS_IGP) {
1210 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1211 base <<= 24;
1212 }
Jerome Glissed594e462010-02-17 21:54:29 +00001213 radeon_vram_location(rdev, &rdev->mc, base);
Alex Deucher8d369bb2010-07-15 10:51:10 -04001214 rdev->mc.gtt_base_align = 0;
Jerome Glissed594e462010-02-17 21:54:29 +00001215 radeon_gtt_location(rdev, mc);
1216 }
1217}
1218
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001219int r600_mc_init(struct radeon_device *rdev)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001220{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001221 u32 tmp;
Alex Deucher5885b7a2009-10-19 17:23:33 -04001222 int chansize, numchan;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001223
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001224 /* Get VRAM informations */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001225 rdev->mc.vram_is_ddr = true;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001226 tmp = RREG32(RAMCFG);
1227 if (tmp & CHANSIZE_OVERRIDE) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001228 chansize = 16;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001229 } else if (tmp & CHANSIZE_MASK) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001230 chansize = 64;
1231 } else {
1232 chansize = 32;
1233 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001234 tmp = RREG32(CHMAP);
1235 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1236 case 0:
1237 default:
1238 numchan = 1;
1239 break;
1240 case 1:
1241 numchan = 2;
1242 break;
1243 case 2:
1244 numchan = 4;
1245 break;
1246 case 3:
1247 numchan = 8;
1248 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001249 }
Alex Deucher5885b7a2009-10-19 17:23:33 -04001250 rdev->mc.vram_width = numchan * chansize;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001251 /* Could aper size report 0 ? */
Jordan Crouse01d73a62010-05-27 13:40:24 -06001252 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1253 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001254 /* Setup GPU memory space */
1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
Jerome Glisse51e5fcd2010-02-19 14:33:54 +00001257 rdev->mc.visible_vram_size = rdev->mc.aper_size;
Jerome Glissec919b372010-08-10 17:41:31 -04001258 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +00001259 r600_vram_gtt_location(rdev, &rdev->mc);
Alex Deucherf47299c2010-03-16 20:54:38 -04001260
Alex Deucherf8920342010-06-30 12:02:03 -04001261 if (rdev->flags & RADEON_IS_IGP) {
1262 rs690_pm_info(rdev);
Alex Deucher06b64762010-01-05 11:27:29 -05001263 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
Alex Deucherf8920342010-06-30 12:02:03 -04001264 }
Alex Deucherf47299c2010-03-16 20:54:38 -04001265 radeon_update_bandwidth_info(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001266 return 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001267}
1268
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001269/* We doesn't check that the GPU really needs a reset we simply do the
1270 * reset, it's up to the caller to determine if the GPU needs one. We
1271 * might add an helper function to check that.
1272 */
1273int r600_gpu_soft_reset(struct radeon_device *rdev)
1274{
Jerome Glissea3c19452009-10-01 18:02:13 +02001275 struct rv515_mc_save save;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001276 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1277 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1278 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1279 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1280 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1281 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1282 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1283 S_008010_GUI_ACTIVE(1);
1284 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1285 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1286 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1287 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1288 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1289 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1290 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1291 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
Jerome Glissea3c19452009-10-01 18:02:13 +02001292 u32 tmp;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001293
Alex Deucher8d96fe92011-01-21 15:38:22 +00001294 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1295 return 0;
1296
Jerome Glisse1a029b72009-10-06 19:04:30 +02001297 dev_info(rdev->dev, "GPU softreset \n");
1298 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1299 RREG32(R_008010_GRBM_STATUS));
1300 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
Jerome Glissea3c19452009-10-01 18:02:13 +02001301 RREG32(R_008014_GRBM_STATUS2));
Jerome Glisse1a029b72009-10-06 19:04:30 +02001302 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1303 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001304 rv515_mc_stop(rdev, &save);
1305 if (r600_mc_wait_for_idle(rdev)) {
1306 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1307 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001308 /* Disable CP parsing/prefetching */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001309 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001310 /* Check if any of the rendering block is busy and reset it */
1311 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1312 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
Jerome Glissea3c19452009-10-01 18:02:13 +02001313 tmp = S_008020_SOFT_RESET_CR(1) |
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001314 S_008020_SOFT_RESET_DB(1) |
1315 S_008020_SOFT_RESET_CB(1) |
1316 S_008020_SOFT_RESET_PA(1) |
1317 S_008020_SOFT_RESET_SC(1) |
1318 S_008020_SOFT_RESET_SMX(1) |
1319 S_008020_SOFT_RESET_SPI(1) |
1320 S_008020_SOFT_RESET_SX(1) |
1321 S_008020_SOFT_RESET_SH(1) |
1322 S_008020_SOFT_RESET_TC(1) |
1323 S_008020_SOFT_RESET_TA(1) |
1324 S_008020_SOFT_RESET_VC(1) |
Jerome Glissea3c19452009-10-01 18:02:13 +02001325 S_008020_SOFT_RESET_VGT(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001326 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
Jerome Glissea3c19452009-10-01 18:02:13 +02001327 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001328 RREG32(R_008020_GRBM_SOFT_RESET);
1329 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001331 }
1332 /* Reset CP (we always reset CP) */
Jerome Glissea3c19452009-10-01 18:02:13 +02001333 tmp = S_008020_SOFT_RESET_CP(1);
1334 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1335 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001336 RREG32(R_008020_GRBM_SOFT_RESET);
1337 mdelay(15);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001338 WREG32(R_008020_GRBM_SOFT_RESET, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001339 /* Wait a little for things to settle down */
Jerome Glisse225758d2010-03-09 14:45:10 +00001340 mdelay(1);
Jerome Glisse1a029b72009-10-06 19:04:30 +02001341 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1342 RREG32(R_008010_GRBM_STATUS));
1343 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1344 RREG32(R_008014_GRBM_STATUS2));
1345 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1346 RREG32(R_000E50_SRBM_STATUS));
Jerome Glissea3c19452009-10-01 18:02:13 +02001347 rv515_mc_resume(rdev, &save);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001348 return 0;
1349}
1350
Jerome Glisse225758d2010-03-09 14:45:10 +00001351bool r600_gpu_is_lockup(struct radeon_device *rdev)
1352{
1353 u32 srbm_status;
1354 u32 grbm_status;
1355 u32 grbm_status2;
Alex Deuchere29ff722010-12-21 16:05:38 -05001356 struct r100_gpu_lockup *lockup;
Jerome Glisse225758d2010-03-09 14:45:10 +00001357 int r;
1358
Alex Deuchere29ff722010-12-21 16:05:38 -05001359 if (rdev->family >= CHIP_RV770)
1360 lockup = &rdev->config.rv770.lockup;
1361 else
1362 lockup = &rdev->config.r600.lockup;
1363
Jerome Glisse225758d2010-03-09 14:45:10 +00001364 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1365 grbm_status = RREG32(R_008010_GRBM_STATUS);
1366 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1367 if (!G_008010_GUI_ACTIVE(grbm_status)) {
Alex Deuchere29ff722010-12-21 16:05:38 -05001368 r100_gpu_lockup_update(lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001369 return false;
1370 }
1371 /* force CP activities */
1372 r = radeon_ring_lock(rdev, 2);
1373 if (!r) {
1374 /* PACKET2 NOP */
1375 radeon_ring_write(rdev, 0x80000000);
1376 radeon_ring_write(rdev, 0x80000000);
1377 radeon_ring_unlock_commit(rdev);
1378 }
1379 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
Alex Deuchere29ff722010-12-21 16:05:38 -05001380 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
Jerome Glisse225758d2010-03-09 14:45:10 +00001381}
1382
Jerome Glissea2d07b72010-03-09 14:45:11 +00001383int r600_asic_reset(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001384{
1385 return r600_gpu_soft_reset(rdev);
1386}
1387
1388static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1389 u32 num_backends,
1390 u32 backend_disable_mask)
1391{
1392 u32 backend_map = 0;
1393 u32 enabled_backends_mask;
1394 u32 enabled_backends_count;
1395 u32 cur_pipe;
1396 u32 swizzle_pipe[R6XX_MAX_PIPES];
1397 u32 cur_backend;
1398 u32 i;
1399
1400 if (num_tile_pipes > R6XX_MAX_PIPES)
1401 num_tile_pipes = R6XX_MAX_PIPES;
1402 if (num_tile_pipes < 1)
1403 num_tile_pipes = 1;
1404 if (num_backends > R6XX_MAX_BACKENDS)
1405 num_backends = R6XX_MAX_BACKENDS;
1406 if (num_backends < 1)
1407 num_backends = 1;
1408
1409 enabled_backends_mask = 0;
1410 enabled_backends_count = 0;
1411 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1412 if (((backend_disable_mask >> i) & 1) == 0) {
1413 enabled_backends_mask |= (1 << i);
1414 ++enabled_backends_count;
1415 }
1416 if (enabled_backends_count == num_backends)
1417 break;
1418 }
1419
1420 if (enabled_backends_count == 0) {
1421 enabled_backends_mask = 1;
1422 enabled_backends_count = 1;
1423 }
1424
1425 if (enabled_backends_count != num_backends)
1426 num_backends = enabled_backends_count;
1427
1428 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1429 switch (num_tile_pipes) {
1430 case 1:
1431 swizzle_pipe[0] = 0;
1432 break;
1433 case 2:
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1436 break;
1437 case 3:
1438 swizzle_pipe[0] = 0;
1439 swizzle_pipe[1] = 1;
1440 swizzle_pipe[2] = 2;
1441 break;
1442 case 4:
1443 swizzle_pipe[0] = 0;
1444 swizzle_pipe[1] = 1;
1445 swizzle_pipe[2] = 2;
1446 swizzle_pipe[3] = 3;
1447 break;
1448 case 5:
1449 swizzle_pipe[0] = 0;
1450 swizzle_pipe[1] = 1;
1451 swizzle_pipe[2] = 2;
1452 swizzle_pipe[3] = 3;
1453 swizzle_pipe[4] = 4;
1454 break;
1455 case 6:
1456 swizzle_pipe[0] = 0;
1457 swizzle_pipe[1] = 2;
1458 swizzle_pipe[2] = 4;
1459 swizzle_pipe[3] = 5;
1460 swizzle_pipe[4] = 1;
1461 swizzle_pipe[5] = 3;
1462 break;
1463 case 7:
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 2;
1466 swizzle_pipe[2] = 4;
1467 swizzle_pipe[3] = 6;
1468 swizzle_pipe[4] = 1;
1469 swizzle_pipe[5] = 3;
1470 swizzle_pipe[6] = 5;
1471 break;
1472 case 8:
1473 swizzle_pipe[0] = 0;
1474 swizzle_pipe[1] = 2;
1475 swizzle_pipe[2] = 4;
1476 swizzle_pipe[3] = 6;
1477 swizzle_pipe[4] = 1;
1478 swizzle_pipe[5] = 3;
1479 swizzle_pipe[6] = 5;
1480 swizzle_pipe[7] = 7;
1481 break;
1482 }
1483
1484 cur_backend = 0;
1485 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1486 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1487 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1488
1489 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1490
1491 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1492 }
1493
1494 return backend_map;
1495}
1496
1497int r600_count_pipe_bits(uint32_t val)
1498{
1499 int i, ret = 0;
1500
1501 for (i = 0; i < 32; i++) {
1502 ret += val & 1;
1503 val >>= 1;
1504 }
1505 return ret;
1506}
1507
1508void r600_gpu_init(struct radeon_device *rdev)
1509{
1510 u32 tiling_config;
1511 u32 ramcfg;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001512 u32 backend_map;
1513 u32 cc_rb_backend_disable;
1514 u32 cc_gc_shader_pipe_config;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001515 u32 tmp;
1516 int i, j;
1517 u32 sq_config;
1518 u32 sq_gpr_resource_mgmt_1 = 0;
1519 u32 sq_gpr_resource_mgmt_2 = 0;
1520 u32 sq_thread_resource_mgmt = 0;
1521 u32 sq_stack_resource_mgmt_1 = 0;
1522 u32 sq_stack_resource_mgmt_2 = 0;
1523
1524 /* FIXME: implement */
1525 switch (rdev->family) {
1526 case CHIP_R600:
1527 rdev->config.r600.max_pipes = 4;
1528 rdev->config.r600.max_tile_pipes = 8;
1529 rdev->config.r600.max_simds = 4;
1530 rdev->config.r600.max_backends = 4;
1531 rdev->config.r600.max_gprs = 256;
1532 rdev->config.r600.max_threads = 192;
1533 rdev->config.r600.max_stack_entries = 256;
1534 rdev->config.r600.max_hw_contexts = 8;
1535 rdev->config.r600.max_gs_threads = 16;
1536 rdev->config.r600.sx_max_export_size = 128;
1537 rdev->config.r600.sx_max_export_pos_size = 16;
1538 rdev->config.r600.sx_max_export_smx_size = 128;
1539 rdev->config.r600.sq_num_cf_insts = 2;
1540 break;
1541 case CHIP_RV630:
1542 case CHIP_RV635:
1543 rdev->config.r600.max_pipes = 2;
1544 rdev->config.r600.max_tile_pipes = 2;
1545 rdev->config.r600.max_simds = 3;
1546 rdev->config.r600.max_backends = 1;
1547 rdev->config.r600.max_gprs = 128;
1548 rdev->config.r600.max_threads = 192;
1549 rdev->config.r600.max_stack_entries = 128;
1550 rdev->config.r600.max_hw_contexts = 8;
1551 rdev->config.r600.max_gs_threads = 4;
1552 rdev->config.r600.sx_max_export_size = 128;
1553 rdev->config.r600.sx_max_export_pos_size = 16;
1554 rdev->config.r600.sx_max_export_smx_size = 128;
1555 rdev->config.r600.sq_num_cf_insts = 2;
1556 break;
1557 case CHIP_RV610:
1558 case CHIP_RV620:
1559 case CHIP_RS780:
1560 case CHIP_RS880:
1561 rdev->config.r600.max_pipes = 1;
1562 rdev->config.r600.max_tile_pipes = 1;
1563 rdev->config.r600.max_simds = 2;
1564 rdev->config.r600.max_backends = 1;
1565 rdev->config.r600.max_gprs = 128;
1566 rdev->config.r600.max_threads = 192;
1567 rdev->config.r600.max_stack_entries = 128;
1568 rdev->config.r600.max_hw_contexts = 4;
1569 rdev->config.r600.max_gs_threads = 4;
1570 rdev->config.r600.sx_max_export_size = 128;
1571 rdev->config.r600.sx_max_export_pos_size = 16;
1572 rdev->config.r600.sx_max_export_smx_size = 128;
1573 rdev->config.r600.sq_num_cf_insts = 1;
1574 break;
1575 case CHIP_RV670:
1576 rdev->config.r600.max_pipes = 4;
1577 rdev->config.r600.max_tile_pipes = 4;
1578 rdev->config.r600.max_simds = 4;
1579 rdev->config.r600.max_backends = 4;
1580 rdev->config.r600.max_gprs = 192;
1581 rdev->config.r600.max_threads = 192;
1582 rdev->config.r600.max_stack_entries = 256;
1583 rdev->config.r600.max_hw_contexts = 8;
1584 rdev->config.r600.max_gs_threads = 16;
1585 rdev->config.r600.sx_max_export_size = 128;
1586 rdev->config.r600.sx_max_export_pos_size = 16;
1587 rdev->config.r600.sx_max_export_smx_size = 128;
1588 rdev->config.r600.sq_num_cf_insts = 2;
1589 break;
1590 default:
1591 break;
1592 }
1593
1594 /* Initialize HDP */
1595 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1596 WREG32((0x2c14 + j), 0x00000000);
1597 WREG32((0x2c18 + j), 0x00000000);
1598 WREG32((0x2c1c + j), 0x00000000);
1599 WREG32((0x2c20 + j), 0x00000000);
1600 WREG32((0x2c24 + j), 0x00000000);
1601 }
1602
1603 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1604
1605 /* Setup tiling */
1606 tiling_config = 0;
1607 ramcfg = RREG32(RAMCFG);
1608 switch (rdev->config.r600.max_tile_pipes) {
1609 case 1:
1610 tiling_config |= PIPE_TILING(0);
1611 break;
1612 case 2:
1613 tiling_config |= PIPE_TILING(1);
1614 break;
1615 case 4:
1616 tiling_config |= PIPE_TILING(2);
1617 break;
1618 case 8:
1619 tiling_config |= PIPE_TILING(3);
1620 break;
1621 default:
1622 break;
1623 }
Alex Deucherd03f5d52010-02-19 16:22:31 -05001624 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
Jerome Glisse961fb592010-02-10 22:30:05 +00001625 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001626 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
Alex Deucher881fe6c2010-10-18 23:54:56 -04001627 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1628 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1629 rdev->config.r600.tiling_group_size = 512;
1630 else
1631 rdev->config.r600.tiling_group_size = 256;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001632 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1633 if (tmp > 3) {
1634 tiling_config |= ROW_TILING(3);
1635 tiling_config |= SAMPLE_SPLIT(3);
1636 } else {
1637 tiling_config |= ROW_TILING(tmp);
1638 tiling_config |= SAMPLE_SPLIT(tmp);
1639 }
1640 tiling_config |= BANK_SWAPS(1);
Alex Deucherd03f5d52010-02-19 16:22:31 -05001641
1642 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1643 cc_rb_backend_disable |=
1644 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1645
1646 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1647 cc_gc_shader_pipe_config |=
1648 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1649 cc_gc_shader_pipe_config |=
1650 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1651
1652 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1653 (R6XX_MAX_BACKENDS -
1654 r600_count_pipe_bits((cc_rb_backend_disable &
1655 R6XX_MAX_BACKENDS_MASK) >> 16)),
1656 (cc_rb_backend_disable >> 16));
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001657 rdev->config.r600.tile_config = tiling_config;
Alex Deucherd03f5d52010-02-19 16:22:31 -05001658 tiling_config |= BACKEND_MAP(backend_map);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001659 WREG32(GB_TILING_CONFIG, tiling_config);
1660 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1661 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1662
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001663 /* Setup pipes */
Alex Deucherd03f5d52010-02-19 16:22:31 -05001664 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1665 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Alex Deucherf867c60d2010-03-05 14:50:37 -05001666 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001667
Alex Deucherd03f5d52010-02-19 16:22:31 -05001668 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001669 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1670 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1671
1672 /* Setup some CP states */
1673 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1674 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1675
1676 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1677 SYNC_WALKER | SYNC_ALIGNER));
1678 /* Setup various GPU states */
1679 if (rdev->family == CHIP_RV670)
1680 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1681
1682 tmp = RREG32(SX_DEBUG_1);
1683 tmp |= SMX_EVENT_RELEASE;
1684 if ((rdev->family > CHIP_R600))
1685 tmp |= ENABLE_NEW_SMX_ADDRESS;
1686 WREG32(SX_DEBUG_1, tmp);
1687
1688 if (((rdev->family) == CHIP_R600) ||
1689 ((rdev->family) == CHIP_RV630) ||
1690 ((rdev->family) == CHIP_RV610) ||
1691 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001692 ((rdev->family) == CHIP_RS780) ||
1693 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001694 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1695 } else {
1696 WREG32(DB_DEBUG, 0);
1697 }
1698 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1699 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1700
1701 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1702 WREG32(VGT_NUM_INSTANCES, 0);
1703
1704 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1705 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1706
1707 tmp = RREG32(SQ_MS_FIFO_SIZES);
1708 if (((rdev->family) == CHIP_RV610) ||
1709 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001710 ((rdev->family) == CHIP_RS780) ||
1711 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001712 tmp = (CACHE_FIFO_SIZE(0xa) |
1713 FETCH_FIFO_HIWATER(0xa) |
1714 DONE_FIFO_HIWATER(0xe0) |
1715 ALU_UPDATE_FIFO_HIWATER(0x8));
1716 } else if (((rdev->family) == CHIP_R600) ||
1717 ((rdev->family) == CHIP_RV630)) {
1718 tmp &= ~DONE_FIFO_HIWATER(0xff);
1719 tmp |= DONE_FIFO_HIWATER(0x4);
1720 }
1721 WREG32(SQ_MS_FIFO_SIZES, tmp);
1722
1723 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1724 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1725 */
1726 sq_config = RREG32(SQ_CONFIG);
1727 sq_config &= ~(PS_PRIO(3) |
1728 VS_PRIO(3) |
1729 GS_PRIO(3) |
1730 ES_PRIO(3));
1731 sq_config |= (DX9_CONSTS |
1732 VC_ENABLE |
1733 PS_PRIO(0) |
1734 VS_PRIO(1) |
1735 GS_PRIO(2) |
1736 ES_PRIO(3));
1737
1738 if ((rdev->family) == CHIP_R600) {
1739 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1740 NUM_VS_GPRS(124) |
1741 NUM_CLAUSE_TEMP_GPRS(4));
1742 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1743 NUM_ES_GPRS(0));
1744 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1745 NUM_VS_THREADS(48) |
1746 NUM_GS_THREADS(4) |
1747 NUM_ES_THREADS(4));
1748 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1749 NUM_VS_STACK_ENTRIES(128));
1750 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1751 NUM_ES_STACK_ENTRIES(0));
1752 } else if (((rdev->family) == CHIP_RV610) ||
1753 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001754 ((rdev->family) == CHIP_RS780) ||
1755 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001756 /* no vertex cache */
1757 sq_config &= ~VC_ENABLE;
1758
1759 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1760 NUM_VS_GPRS(44) |
1761 NUM_CLAUSE_TEMP_GPRS(2));
1762 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1763 NUM_ES_GPRS(17));
1764 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1765 NUM_VS_THREADS(78) |
1766 NUM_GS_THREADS(4) |
1767 NUM_ES_THREADS(31));
1768 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1769 NUM_VS_STACK_ENTRIES(40));
1770 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1771 NUM_ES_STACK_ENTRIES(16));
1772 } else if (((rdev->family) == CHIP_RV630) ||
1773 ((rdev->family) == CHIP_RV635)) {
1774 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1775 NUM_VS_GPRS(44) |
1776 NUM_CLAUSE_TEMP_GPRS(2));
1777 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1778 NUM_ES_GPRS(18));
1779 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1780 NUM_VS_THREADS(78) |
1781 NUM_GS_THREADS(4) |
1782 NUM_ES_THREADS(31));
1783 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1784 NUM_VS_STACK_ENTRIES(40));
1785 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1786 NUM_ES_STACK_ENTRIES(16));
1787 } else if ((rdev->family) == CHIP_RV670) {
1788 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1789 NUM_VS_GPRS(44) |
1790 NUM_CLAUSE_TEMP_GPRS(2));
1791 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1792 NUM_ES_GPRS(17));
1793 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1794 NUM_VS_THREADS(78) |
1795 NUM_GS_THREADS(4) |
1796 NUM_ES_THREADS(31));
1797 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1798 NUM_VS_STACK_ENTRIES(64));
1799 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1800 NUM_ES_STACK_ENTRIES(64));
1801 }
1802
1803 WREG32(SQ_CONFIG, sq_config);
1804 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1805 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1806 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1807 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1808 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1809
1810 if (((rdev->family) == CHIP_RV610) ||
1811 ((rdev->family) == CHIP_RV620) ||
Alex Deucheree59f2b2009-11-05 13:11:46 -05001812 ((rdev->family) == CHIP_RS780) ||
1813 ((rdev->family) == CHIP_RS880)) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001814 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1815 } else {
1816 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1817 }
1818
1819 /* More default values. 2D/3D driver should adjust as needed */
1820 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1821 S1_X(0x4) | S1_Y(0xc)));
1822 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1823 S1_X(0x2) | S1_Y(0x2) |
1824 S2_X(0xa) | S2_Y(0x6) |
1825 S3_X(0x6) | S3_Y(0xa)));
1826 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1827 S1_X(0x4) | S1_Y(0xc) |
1828 S2_X(0x1) | S2_Y(0x6) |
1829 S3_X(0xa) | S3_Y(0xe)));
1830 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1831 S5_X(0x0) | S5_Y(0x0) |
1832 S6_X(0xb) | S6_Y(0x4) |
1833 S7_X(0x7) | S7_Y(0x8)));
1834
1835 WREG32(VGT_STRMOUT_EN, 0);
1836 tmp = rdev->config.r600.max_pipes * 16;
1837 switch (rdev->family) {
1838 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001839 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001840 case CHIP_RS780:
1841 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001842 tmp += 32;
1843 break;
1844 case CHIP_RV670:
1845 tmp += 128;
1846 break;
1847 default:
1848 break;
1849 }
1850 if (tmp > 256) {
1851 tmp = 256;
1852 }
1853 WREG32(VGT_ES_PER_GS, 128);
1854 WREG32(VGT_GS_PER_ES, tmp);
1855 WREG32(VGT_GS_PER_VS, 2);
1856 WREG32(VGT_GS_VERTEX_REUSE, 16);
1857
1858 /* more default values. 2D/3D driver should adjust as needed */
1859 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1860 WREG32(VGT_STRMOUT_EN, 0);
1861 WREG32(SX_MISC, 0);
1862 WREG32(PA_SC_MODE_CNTL, 0);
1863 WREG32(PA_SC_AA_CONFIG, 0);
1864 WREG32(PA_SC_LINE_STIPPLE, 0);
1865 WREG32(SPI_INPUT_Z, 0);
1866 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1867 WREG32(CB_COLOR7_FRAG, 0);
1868
1869 /* Clear render buffer base addresses */
1870 WREG32(CB_COLOR0_BASE, 0);
1871 WREG32(CB_COLOR1_BASE, 0);
1872 WREG32(CB_COLOR2_BASE, 0);
1873 WREG32(CB_COLOR3_BASE, 0);
1874 WREG32(CB_COLOR4_BASE, 0);
1875 WREG32(CB_COLOR5_BASE, 0);
1876 WREG32(CB_COLOR6_BASE, 0);
1877 WREG32(CB_COLOR7_BASE, 0);
1878 WREG32(CB_COLOR7_FRAG, 0);
1879
1880 switch (rdev->family) {
1881 case CHIP_RV610:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001882 case CHIP_RV620:
Alex Deucheree59f2b2009-11-05 13:11:46 -05001883 case CHIP_RS780:
1884 case CHIP_RS880:
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001885 tmp = TC_L2_SIZE(8);
1886 break;
1887 case CHIP_RV630:
1888 case CHIP_RV635:
1889 tmp = TC_L2_SIZE(4);
1890 break;
1891 case CHIP_R600:
1892 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1893 break;
1894 default:
1895 tmp = TC_L2_SIZE(0);
1896 break;
1897 }
1898 WREG32(TC_CNTL, tmp);
1899
1900 tmp = RREG32(HDP_HOST_PATH_CNTL);
1901 WREG32(HDP_HOST_PATH_CNTL, tmp);
1902
1903 tmp = RREG32(ARB_POP);
1904 tmp |= ENABLE_TC128;
1905 WREG32(ARB_POP, tmp);
1906
1907 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1908 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1909 NUM_CLIP_SEQ(3)));
1910 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1911}
1912
1913
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001914/*
1915 * Indirect registers accessor
1916 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001917u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001918{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001919 u32 r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001920
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001921 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1922 (void)RREG32(PCIE_PORT_INDEX);
1923 r = RREG32(PCIE_PORT_DATA);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001924 return r;
1925}
1926
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001927void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001928{
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001929 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1930 (void)RREG32(PCIE_PORT_INDEX);
1931 WREG32(PCIE_PORT_DATA, (v));
1932 (void)RREG32(PCIE_PORT_DATA);
1933}
1934
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001935/*
1936 * CP & Ring
1937 */
1938void r600_cp_stop(struct radeon_device *rdev)
1939{
Jerome Glissec919b372010-08-10 17:41:31 -04001940 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
Alex Deucher724c80e2010-08-27 18:25:25 -04001942 WREG32(SCRATCH_UMSK, 0);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001943}
1944
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001945int r600_init_microcode(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001946{
1947 struct platform_device *pdev;
1948 const char *chip_name;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001949 const char *rlc_chip_name;
1950 size_t pfp_req_size, me_req_size, rlc_req_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001951 char fw_name[30];
1952 int err;
1953
1954 DRM_DEBUG("\n");
1955
1956 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1957 err = IS_ERR(pdev);
1958 if (err) {
1959 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1960 return -EINVAL;
1961 }
1962
1963 switch (rdev->family) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001964 case CHIP_R600:
1965 chip_name = "R600";
1966 rlc_chip_name = "R600";
1967 break;
1968 case CHIP_RV610:
1969 chip_name = "RV610";
1970 rlc_chip_name = "R600";
1971 break;
1972 case CHIP_RV630:
1973 chip_name = "RV630";
1974 rlc_chip_name = "R600";
1975 break;
1976 case CHIP_RV620:
1977 chip_name = "RV620";
1978 rlc_chip_name = "R600";
1979 break;
1980 case CHIP_RV635:
1981 chip_name = "RV635";
1982 rlc_chip_name = "R600";
1983 break;
1984 case CHIP_RV670:
1985 chip_name = "RV670";
1986 rlc_chip_name = "R600";
1987 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001988 case CHIP_RS780:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001989 case CHIP_RS880:
1990 chip_name = "RS780";
1991 rlc_chip_name = "R600";
1992 break;
1993 case CHIP_RV770:
1994 chip_name = "RV770";
1995 rlc_chip_name = "R700";
1996 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001997 case CHIP_RV730:
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001998 case CHIP_RV740:
1999 chip_name = "RV730";
2000 rlc_chip_name = "R700";
2001 break;
2002 case CHIP_RV710:
2003 chip_name = "RV710";
2004 rlc_chip_name = "R700";
2005 break;
Alex Deucherfe251e22010-03-24 13:36:43 -04002006 case CHIP_CEDAR:
2007 chip_name = "CEDAR";
Alex Deucher45f9a392010-03-24 13:55:51 -04002008 rlc_chip_name = "CEDAR";
Alex Deucherfe251e22010-03-24 13:36:43 -04002009 break;
2010 case CHIP_REDWOOD:
2011 chip_name = "REDWOOD";
Alex Deucher45f9a392010-03-24 13:55:51 -04002012 rlc_chip_name = "REDWOOD";
Alex Deucherfe251e22010-03-24 13:36:43 -04002013 break;
2014 case CHIP_JUNIPER:
2015 chip_name = "JUNIPER";
Alex Deucher45f9a392010-03-24 13:55:51 -04002016 rlc_chip_name = "JUNIPER";
Alex Deucherfe251e22010-03-24 13:36:43 -04002017 break;
2018 case CHIP_CYPRESS:
2019 case CHIP_HEMLOCK:
2020 chip_name = "CYPRESS";
Alex Deucher45f9a392010-03-24 13:55:51 -04002021 rlc_chip_name = "CYPRESS";
Alex Deucherfe251e22010-03-24 13:36:43 -04002022 break;
Alex Deucher439bd6c2010-11-22 17:56:31 -05002023 case CHIP_PALM:
2024 chip_name = "PALM";
2025 rlc_chip_name = "SUMO";
2026 break;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002027 default: BUG();
2028 }
2029
Alex Deucherfe251e22010-03-24 13:36:43 -04002030 if (rdev->family >= CHIP_CEDAR) {
2031 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2032 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
Alex Deucher45f9a392010-03-24 13:55:51 -04002033 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
Alex Deucherfe251e22010-03-24 13:36:43 -04002034 } else if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002035 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2036 me_req_size = R700_PM4_UCODE_SIZE * 4;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002037 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002038 } else {
2039 pfp_req_size = PFP_UCODE_SIZE * 4;
2040 me_req_size = PM4_UCODE_SIZE * 12;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002041 rlc_req_size = RLC_UCODE_SIZE * 4;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002042 }
2043
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002044 DRM_INFO("Loading %s Microcode\n", chip_name);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002045
2046 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2047 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2048 if (err)
2049 goto out;
2050 if (rdev->pfp_fw->size != pfp_req_size) {
2051 printk(KERN_ERR
2052 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2053 rdev->pfp_fw->size, fw_name);
2054 err = -EINVAL;
2055 goto out;
2056 }
2057
2058 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2059 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2060 if (err)
2061 goto out;
2062 if (rdev->me_fw->size != me_req_size) {
2063 printk(KERN_ERR
2064 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2065 rdev->me_fw->size, fw_name);
2066 err = -EINVAL;
2067 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002068
2069 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2070 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2071 if (err)
2072 goto out;
2073 if (rdev->rlc_fw->size != rlc_req_size) {
2074 printk(KERN_ERR
2075 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2076 rdev->rlc_fw->size, fw_name);
2077 err = -EINVAL;
2078 }
2079
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002080out:
2081 platform_device_unregister(pdev);
2082
2083 if (err) {
2084 if (err != -EINVAL)
2085 printk(KERN_ERR
2086 "r600_cp: Failed to load firmware \"%s\"\n",
2087 fw_name);
2088 release_firmware(rdev->pfp_fw);
2089 rdev->pfp_fw = NULL;
2090 release_firmware(rdev->me_fw);
2091 rdev->me_fw = NULL;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002092 release_firmware(rdev->rlc_fw);
2093 rdev->rlc_fw = NULL;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002094 }
2095 return err;
2096}
2097
2098static int r600_cp_load_microcode(struct radeon_device *rdev)
2099{
2100 const __be32 *fw_data;
2101 int i;
2102
2103 if (!rdev->me_fw || !rdev->pfp_fw)
2104 return -EINVAL;
2105
2106 r600_cp_stop(rdev);
2107
Cédric Cano4eace7f2011-02-11 19:45:38 -05002108 WREG32(CP_RB_CNTL,
2109#ifdef __BIG_ENDIAN
2110 BUF_SWAP_32BIT |
2111#endif
2112 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002113
2114 /* Reset cp */
2115 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2116 RREG32(GRBM_SOFT_RESET);
2117 mdelay(15);
2118 WREG32(GRBM_SOFT_RESET, 0);
2119
2120 WREG32(CP_ME_RAM_WADDR, 0);
2121
2122 fw_data = (const __be32 *)rdev->me_fw->data;
2123 WREG32(CP_ME_RAM_WADDR, 0);
2124 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2125 WREG32(CP_ME_RAM_DATA,
2126 be32_to_cpup(fw_data++));
2127
2128 fw_data = (const __be32 *)rdev->pfp_fw->data;
2129 WREG32(CP_PFP_UCODE_ADDR, 0);
2130 for (i = 0; i < PFP_UCODE_SIZE; i++)
2131 WREG32(CP_PFP_UCODE_DATA,
2132 be32_to_cpup(fw_data++));
2133
2134 WREG32(CP_PFP_UCODE_ADDR, 0);
2135 WREG32(CP_ME_RAM_WADDR, 0);
2136 WREG32(CP_ME_RAM_RADDR, 0);
2137 return 0;
2138}
2139
2140int r600_cp_start(struct radeon_device *rdev)
2141{
2142 int r;
2143 uint32_t cp_me;
2144
2145 r = radeon_ring_lock(rdev, 7);
2146 if (r) {
2147 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2148 return r;
2149 }
2150 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2151 radeon_ring_write(rdev, 0x1);
Alex Deucher7e7b41d2010-09-02 21:32:32 -04002152 if (rdev->family >= CHIP_RV770) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002153 radeon_ring_write(rdev, 0x0);
2154 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
Alex Deucherfe251e22010-03-24 13:36:43 -04002155 } else {
2156 radeon_ring_write(rdev, 0x3);
2157 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002158 }
2159 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2160 radeon_ring_write(rdev, 0);
2161 radeon_ring_write(rdev, 0);
2162 radeon_ring_unlock_commit(rdev);
2163
2164 cp_me = 0xff;
2165 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2166 return 0;
2167}
2168
2169int r600_cp_resume(struct radeon_device *rdev)
2170{
2171 u32 tmp;
2172 u32 rb_bufsz;
2173 int r;
2174
2175 /* Reset cp */
2176 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2177 RREG32(GRBM_SOFT_RESET);
2178 mdelay(15);
2179 WREG32(GRBM_SOFT_RESET, 0);
2180
2181 /* Set ring buffer size */
2182 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
Alex Deucher724c80e2010-08-27 18:25:25 -04002183 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002184#ifdef __BIG_ENDIAN
Alex Deucherd6f28932009-11-02 16:01:27 -05002185 tmp |= BUF_SWAP_32BIT;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002186#endif
Alex Deucherd6f28932009-11-02 16:01:27 -05002187 WREG32(CP_RB_CNTL, tmp);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002188 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2189
2190 /* Set the write pointer delay */
2191 WREG32(CP_RB_WPTR_DELAY, 0);
2192
2193 /* Initialize the ring buffer's read and write pointers */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002194 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2195 WREG32(CP_RB_RPTR_WR, 0);
2196 WREG32(CP_RB_WPTR, 0);
Alex Deucher724c80e2010-08-27 18:25:25 -04002197
2198 /* set the wb address whether it's enabled or not */
Cédric Cano4eace7f2011-02-11 19:45:38 -05002199 WREG32(CP_RB_RPTR_ADDR,
2200#ifdef __BIG_ENDIAN
2201 RB_RPTR_SWAP(2) |
2202#endif
2203 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
Alex Deucher724c80e2010-08-27 18:25:25 -04002204 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2205 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2206
2207 if (rdev->wb.enabled)
2208 WREG32(SCRATCH_UMSK, 0xff);
2209 else {
2210 tmp |= RB_NO_UPDATE;
2211 WREG32(SCRATCH_UMSK, 0);
2212 }
2213
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002214 mdelay(1);
2215 WREG32(CP_RB_CNTL, tmp);
2216
2217 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2218 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2219
2220 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2221 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2222
2223 r600_cp_start(rdev);
2224 rdev->cp.ready = true;
2225 r = radeon_ring_test(rdev);
2226 if (r) {
2227 rdev->cp.ready = false;
2228 return r;
2229 }
2230 return 0;
2231}
2232
2233void r600_cp_commit(struct radeon_device *rdev)
2234{
2235 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2236 (void)RREG32(CP_RB_WPTR);
2237}
2238
2239void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2240{
2241 u32 rb_bufsz;
2242
2243 /* Align ring size */
2244 rb_bufsz = drm_order(ring_size / 8);
2245 ring_size = (1 << (rb_bufsz + 1)) * 4;
2246 rdev->cp.ring_size = ring_size;
2247 rdev->cp.align_mask = 16 - 1;
2248}
2249
Jerome Glisse655efd32010-02-02 11:51:45 +01002250void r600_cp_fini(struct radeon_device *rdev)
2251{
2252 r600_cp_stop(rdev);
2253 radeon_ring_fini(rdev);
2254}
2255
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002256
2257/*
2258 * GPU scratch registers helpers function.
2259 */
2260void r600_scratch_init(struct radeon_device *rdev)
2261{
2262 int i;
2263
2264 rdev->scratch.num_reg = 7;
Alex Deucher724c80e2010-08-27 18:25:25 -04002265 rdev->scratch.reg_base = SCRATCH_REG0;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002266 for (i = 0; i < rdev->scratch.num_reg; i++) {
2267 rdev->scratch.free[i] = true;
Alex Deucher724c80e2010-08-27 18:25:25 -04002268 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002269 }
2270}
2271
2272int r600_ring_test(struct radeon_device *rdev)
2273{
2274 uint32_t scratch;
2275 uint32_t tmp = 0;
2276 unsigned i;
2277 int r;
2278
2279 r = radeon_scratch_get(rdev, &scratch);
2280 if (r) {
2281 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2282 return r;
2283 }
2284 WREG32(scratch, 0xCAFEDEAD);
2285 r = radeon_ring_lock(rdev, 3);
2286 if (r) {
2287 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2288 radeon_scratch_free(rdev, scratch);
2289 return r;
2290 }
2291 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2292 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2293 radeon_ring_write(rdev, 0xDEADBEEF);
2294 radeon_ring_unlock_commit(rdev);
2295 for (i = 0; i < rdev->usec_timeout; i++) {
2296 tmp = RREG32(scratch);
2297 if (tmp == 0xDEADBEEF)
2298 break;
2299 DRM_UDELAY(1);
2300 }
2301 if (i < rdev->usec_timeout) {
2302 DRM_INFO("ring test succeeded in %d usecs\n", i);
2303 } else {
2304 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2305 scratch, tmp);
2306 r = -EINVAL;
2307 }
2308 radeon_scratch_free(rdev, scratch);
2309 return r;
2310}
2311
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002312void r600_fence_ring_emit(struct radeon_device *rdev,
2313 struct radeon_fence *fence)
2314{
Alex Deucherd0f8a852010-09-04 05:04:34 -04002315 if (rdev->wb.use_event) {
2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2317 (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2318 /* EVENT_WRITE_EOP - flush caches, send int */
2319 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2320 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2321 radeon_ring_write(rdev, addr & 0xffffffff);
2322 radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2323 radeon_ring_write(rdev, fence->seq);
2324 radeon_ring_write(rdev, 0);
2325 } else {
2326 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2327 radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2328 /* wait for 3D idle clean */
2329 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2330 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2331 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2332 /* Emit fence sequence & fire IRQ */
2333 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2334 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2335 radeon_ring_write(rdev, fence->seq);
2336 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2337 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2338 radeon_ring_write(rdev, RB_INT_STAT);
2339 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002340}
2341
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002342int r600_copy_blit(struct radeon_device *rdev,
2343 uint64_t src_offset, uint64_t dst_offset,
2344 unsigned num_pages, struct radeon_fence *fence)
2345{
Jerome Glisseff82f052010-01-22 15:19:00 +01002346 int r;
2347
2348 mutex_lock(&rdev->r600_blit.mutex);
2349 rdev->r600_blit.vb_ib = NULL;
2350 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2351 if (r) {
2352 if (rdev->r600_blit.vb_ib)
2353 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2354 mutex_unlock(&rdev->r600_blit.mutex);
2355 return r;
2356 }
Matt Turnera77f1712009-10-14 00:34:41 -04002357 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002358 r600_blit_done_copy(rdev, fence);
Jerome Glisseff82f052010-01-22 15:19:00 +01002359 mutex_unlock(&rdev->r600_blit.mutex);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002360 return 0;
2361}
2362
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002363int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2364 uint32_t tiling_flags, uint32_t pitch,
2365 uint32_t offset, uint32_t obj_size)
2366{
2367 /* FIXME: implement */
2368 return 0;
2369}
2370
2371void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2372{
2373 /* FIXME: implement */
2374}
2375
Dave Airliefc30b8e2009-09-18 15:19:37 +10002376int r600_startup(struct radeon_device *rdev)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002377{
2378 int r;
2379
Alex Deucher9e46a482011-01-06 18:49:35 -05002380 /* enable pcie gen2 link */
2381 r600_pcie_gen2_enable(rdev);
2382
Alex Deucher779720a2009-12-09 19:31:44 -05002383 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2384 r = r600_init_microcode(rdev);
2385 if (r) {
2386 DRM_ERROR("Failed to load firmware!\n");
2387 return r;
2388 }
2389 }
2390
Jerome Glissea3c19452009-10-01 18:02:13 +02002391 r600_mc_program(rdev);
Jerome Glisse1a029b72009-10-06 19:04:30 +02002392 if (rdev->flags & RADEON_IS_AGP) {
2393 r600_agp_enable(rdev);
2394 } else {
2395 r = r600_pcie_gart_enable(rdev);
2396 if (r)
2397 return r;
2398 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002399 r600_gpu_init(rdev);
Jerome Glissec38c7b62010-02-04 17:27:27 +01002400 r = r600_blit_init(rdev);
2401 if (r) {
2402 r600_blit_fini(rdev);
2403 rdev->asic->copy = NULL;
2404 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2405 }
Alex Deucherb70d6bb2010-08-06 21:36:58 -04002406
Alex Deucher724c80e2010-08-27 18:25:25 -04002407 /* allocate wb buffer */
2408 r = radeon_wb_init(rdev);
2409 if (r)
2410 return r;
2411
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002412 /* Enable IRQ */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002413 r = r600_irq_init(rdev);
2414 if (r) {
2415 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2416 radeon_irq_kms_fini(rdev);
2417 return r;
2418 }
2419 r600_irq_set(rdev);
2420
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002421 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2422 if (r)
2423 return r;
2424 r = r600_cp_load_microcode(rdev);
2425 if (r)
2426 return r;
2427 r = r600_cp_resume(rdev);
2428 if (r)
2429 return r;
Alex Deucher724c80e2010-08-27 18:25:25 -04002430
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002431 return 0;
2432}
2433
Dave Airlie28d52042009-09-21 14:33:58 +10002434void r600_vga_set_state(struct radeon_device *rdev, bool state)
2435{
2436 uint32_t temp;
2437
2438 temp = RREG32(CONFIG_CNTL);
2439 if (state == false) {
2440 temp &= ~(1<<0);
2441 temp |= (1<<1);
2442 } else {
2443 temp &= ~(1<<1);
2444 }
2445 WREG32(CONFIG_CNTL, temp);
2446}
2447
Dave Airliefc30b8e2009-09-18 15:19:37 +10002448int r600_resume(struct radeon_device *rdev)
2449{
2450 int r;
2451
Jerome Glisse1a029b72009-10-06 19:04:30 +02002452 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2453 * posting will perform necessary task to bring back GPU into good
2454 * shape.
2455 */
Dave Airliefc30b8e2009-09-18 15:19:37 +10002456 /* post card */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002457 atom_asic_init(rdev->mode_info.atom_context);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002458
2459 r = r600_startup(rdev);
2460 if (r) {
2461 DRM_ERROR("r600 startup failed on resume\n");
2462 return r;
2463 }
2464
Jerome Glisse62a8ea32009-10-01 18:02:11 +02002465 r = r600_ib_test(rdev);
Dave Airliefc30b8e2009-09-18 15:19:37 +10002466 if (r) {
2467 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2468 return r;
2469 }
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002470
2471 r = r600_audio_init(rdev);
2472 if (r) {
2473 DRM_ERROR("radeon: audio resume failed\n");
2474 return r;
2475 }
2476
Dave Airliefc30b8e2009-09-18 15:19:37 +10002477 return r;
2478}
2479
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002480int r600_suspend(struct radeon_device *rdev)
2481{
Jerome Glisse4c788672009-11-20 14:29:23 +01002482 int r;
2483
Rafał Miłecki38fd2c62010-01-28 18:16:30 +01002484 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002485 /* FIXME: we should wait for ring to be empty */
2486 r600_cp_stop(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002487 rdev->cp.ready = false;
Jerome Glisse0c452492010-01-15 14:44:37 +01002488 r600_irq_suspend(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002489 radeon_wb_disable(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002490 r600_pcie_gart_disable(rdev);
Dave Airliebc1a6312009-09-15 11:07:52 +10002491 /* unpin shaders bo */
Jerome Glisse30d2d9a2010-01-13 10:29:27 +01002492 if (rdev->r600_blit.shader_obj) {
2493 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2494 if (!r) {
2495 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2496 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2497 }
2498 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002499 return 0;
2500}
2501
2502/* Plan is to move initialization in that function and use
2503 * helper function so that radeon_device_init pretty much
2504 * do nothing more than calling asic specific function. This
2505 * should also allow to remove a bunch of callback function
2506 * like vram_info.
2507 */
2508int r600_init(struct radeon_device *rdev)
2509{
2510 int r;
2511
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002512 r = radeon_dummy_page_init(rdev);
2513 if (r)
2514 return r;
2515 if (r600_debugfs_mc_info_init(rdev)) {
2516 DRM_ERROR("Failed to register debugfs file for mc !\n");
2517 }
2518 /* This don't do much */
2519 r = radeon_gem_init(rdev);
2520 if (r)
2521 return r;
2522 /* Read BIOS */
2523 if (!radeon_get_bios(rdev)) {
2524 if (ASIC_IS_AVIVO(rdev))
2525 return -EINVAL;
2526 }
2527 /* Must be an ATOMBIOS */
Jerome Glissee7d40b92009-10-01 18:02:15 +02002528 if (!rdev->is_atom_bios) {
2529 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002530 return -EINVAL;
Jerome Glissee7d40b92009-10-01 18:02:15 +02002531 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002532 r = radeon_atombios_init(rdev);
2533 if (r)
2534 return r;
2535 /* Post card if necessary */
Alex Deucherfd909c32011-01-11 18:08:59 -05002536 if (!radeon_card_posted(rdev)) {
Dave Airlie72542d72009-12-01 14:06:31 +10002537 if (!rdev->bios) {
2538 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2539 return -EINVAL;
2540 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002541 DRM_INFO("GPU not posted. posting now...\n");
2542 atom_asic_init(rdev->mode_info.atom_context);
2543 }
2544 /* Initialize scratch registers */
2545 r600_scratch_init(rdev);
2546 /* Initialize surface registers */
2547 radeon_surface_init(rdev);
Rafał Miłecki74338742009-11-03 00:53:02 +01002548 /* Initialize clocks */
Michel Dänzer5e6dde72009-09-17 09:42:28 +02002549 radeon_get_clock_info(rdev->ddev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002550 /* Fence driver */
2551 r = radeon_fence_driver_init(rdev);
2552 if (r)
2553 return r;
Jerome Glisse700a0cc2010-01-13 15:16:38 +01002554 if (rdev->flags & RADEON_IS_AGP) {
2555 r = radeon_agp_init(rdev);
2556 if (r)
2557 radeon_agp_disable(rdev);
2558 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002559 r = r600_mc_init(rdev);
Jerome Glisseb574f252009-10-06 19:04:29 +02002560 if (r)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002561 return r;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002562 /* Memory manager */
Jerome Glisse4c788672009-11-20 14:29:23 +01002563 r = radeon_bo_init(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002564 if (r)
2565 return r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002566
2567 r = radeon_irq_kms_init(rdev);
2568 if (r)
2569 return r;
2570
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002571 rdev->cp.ring_obj = NULL;
2572 r600_ring_init(rdev, 1024 * 1024);
2573
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002574 rdev->ih.ring_obj = NULL;
2575 r600_ih_ring_init(rdev, 64 * 1024);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002576
Jerome Glisse4aac0472009-09-14 18:29:49 +02002577 r = r600_pcie_gart_init(rdev);
2578 if (r)
2579 return r;
2580
Alex Deucher779720a2009-12-09 19:31:44 -05002581 rdev->accel_working = true;
Dave Airliefc30b8e2009-09-18 15:19:37 +10002582 r = r600_startup(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002583 if (r) {
Jerome Glisse655efd32010-02-02 11:51:45 +01002584 dev_err(rdev->dev, "disabling GPU acceleration\n");
2585 r600_cp_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002586 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002587 radeon_wb_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002588 radeon_irq_kms_fini(rdev);
Jerome Glisse75c81292009-10-01 18:02:14 +02002589 r600_pcie_gart_fini(rdev);
Jerome Glisse733289c2009-09-16 15:24:21 +02002590 rdev->accel_working = false;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002591 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002592 if (rdev->accel_working) {
2593 r = radeon_ib_pool_init(rdev);
2594 if (r) {
Jerome Glissedb963802010-01-17 21:21:56 +01002595 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Jerome Glisse733289c2009-09-16 15:24:21 +02002596 rdev->accel_working = false;
Jerome Glissedb963802010-01-17 21:21:56 +01002597 } else {
2598 r = r600_ib_test(rdev);
2599 if (r) {
2600 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2601 rdev->accel_working = false;
2602 }
Jerome Glisse733289c2009-09-16 15:24:21 +02002603 }
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002604 }
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002605
2606 r = r600_audio_init(rdev);
2607 if (r)
2608 return r; /* TODO error handling */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002609 return 0;
2610}
2611
2612void r600_fini(struct radeon_device *rdev)
2613{
Christian Koenigdafc3bd2009-10-11 23:49:13 +02002614 r600_audio_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002615 r600_blit_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002616 r600_cp_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002617 r600_irq_fini(rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04002618 radeon_wb_fini(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002619 radeon_irq_kms_fini(rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +02002620 r600_pcie_gart_fini(rdev);
Jerome Glisse655efd32010-02-02 11:51:45 +01002621 radeon_agp_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002622 radeon_gem_fini(rdev);
2623 radeon_fence_driver_fini(rdev);
Jerome Glisse4c788672009-11-20 14:29:23 +01002624 radeon_bo_fini(rdev);
Jerome Glissee7d40b92009-10-01 18:02:15 +02002625 radeon_atombios_fini(rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002626 kfree(rdev->bios);
2627 rdev->bios = NULL;
2628 radeon_dummy_page_fini(rdev);
2629}
2630
2631
2632/*
2633 * CS stuff
2634 */
2635void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2636{
2637 /* FIXME: implement */
2638 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
Cédric Cano4eace7f2011-02-11 19:45:38 -05002639 radeon_ring_write(rdev,
2640#ifdef __BIG_ENDIAN
2641 (2 << 0) |
2642#endif
2643 (ib->gpu_addr & 0xFFFFFFFC));
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002644 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2645 radeon_ring_write(rdev, ib->length_dw);
2646}
2647
2648int r600_ib_test(struct radeon_device *rdev)
2649{
2650 struct radeon_ib *ib;
2651 uint32_t scratch;
2652 uint32_t tmp = 0;
2653 unsigned i;
2654 int r;
2655
2656 r = radeon_scratch_get(rdev, &scratch);
2657 if (r) {
2658 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2659 return r;
2660 }
2661 WREG32(scratch, 0xCAFEDEAD);
2662 r = radeon_ib_get(rdev, &ib);
2663 if (r) {
2664 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2665 return r;
2666 }
2667 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2668 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2669 ib->ptr[2] = 0xDEADBEEF;
2670 ib->ptr[3] = PACKET2(0);
2671 ib->ptr[4] = PACKET2(0);
2672 ib->ptr[5] = PACKET2(0);
2673 ib->ptr[6] = PACKET2(0);
2674 ib->ptr[7] = PACKET2(0);
2675 ib->ptr[8] = PACKET2(0);
2676 ib->ptr[9] = PACKET2(0);
2677 ib->ptr[10] = PACKET2(0);
2678 ib->ptr[11] = PACKET2(0);
2679 ib->ptr[12] = PACKET2(0);
2680 ib->ptr[13] = PACKET2(0);
2681 ib->ptr[14] = PACKET2(0);
2682 ib->ptr[15] = PACKET2(0);
2683 ib->length_dw = 16;
2684 r = radeon_ib_schedule(rdev, ib);
2685 if (r) {
2686 radeon_scratch_free(rdev, scratch);
2687 radeon_ib_free(rdev, &ib);
2688 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2689 return r;
2690 }
2691 r = radeon_fence_wait(ib->fence, false);
2692 if (r) {
2693 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2694 return r;
2695 }
2696 for (i = 0; i < rdev->usec_timeout; i++) {
2697 tmp = RREG32(scratch);
2698 if (tmp == 0xDEADBEEF)
2699 break;
2700 DRM_UDELAY(1);
2701 }
2702 if (i < rdev->usec_timeout) {
2703 DRM_INFO("ib test succeeded in %u usecs\n", i);
2704 } else {
Daniel J Blueman4417d7f2010-09-22 17:57:19 +01002705 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002706 scratch, tmp);
2707 r = -EINVAL;
2708 }
2709 radeon_scratch_free(rdev, scratch);
2710 radeon_ib_free(rdev, &ib);
2711 return r;
2712}
2713
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002714/*
2715 * Interrupts
2716 *
2717 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2718 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2719 * writing to the ring and the GPU consuming, the GPU writes to the ring
2720 * and host consumes. As the host irq handler processes interrupts, it
2721 * increments the rptr. When the rptr catches up with the wptr, all the
2722 * current interrupts have been processed.
2723 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002724
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002725void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2726{
2727 u32 rb_bufsz;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10002728
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002729 /* Align ring size */
2730 rb_bufsz = drm_order(ring_size / 4);
2731 ring_size = (1 << rb_bufsz) * 4;
2732 rdev->ih.ring_size = ring_size;
Jerome Glisse0c452492010-01-15 14:44:37 +01002733 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2734 rdev->ih.rptr = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002735}
2736
Jerome Glisse0c452492010-01-15 14:44:37 +01002737static int r600_ih_ring_alloc(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002738{
2739 int r;
2740
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002741 /* Allocate ring buffer */
2742 if (rdev->ih.ring_obj == NULL) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002743 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
Alex Deucher268b2512010-11-17 19:00:26 -05002744 PAGE_SIZE, true,
Jerome Glisse4c788672009-11-20 14:29:23 +01002745 RADEON_GEM_DOMAIN_GTT,
2746 &rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002747 if (r) {
2748 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2749 return r;
2750 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002751 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2752 if (unlikely(r != 0))
2753 return r;
2754 r = radeon_bo_pin(rdev->ih.ring_obj,
2755 RADEON_GEM_DOMAIN_GTT,
2756 &rdev->ih.gpu_addr);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002757 if (r) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002758 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002759 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2760 return r;
2761 }
Jerome Glisse4c788672009-11-20 14:29:23 +01002762 r = radeon_bo_kmap(rdev->ih.ring_obj,
2763 (void **)&rdev->ih.ring);
2764 radeon_bo_unreserve(rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002765 if (r) {
2766 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2767 return r;
2768 }
2769 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002770 return 0;
2771}
2772
2773static void r600_ih_ring_fini(struct radeon_device *rdev)
2774{
Jerome Glisse4c788672009-11-20 14:29:23 +01002775 int r;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002776 if (rdev->ih.ring_obj) {
Jerome Glisse4c788672009-11-20 14:29:23 +01002777 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2778 if (likely(r == 0)) {
2779 radeon_bo_kunmap(rdev->ih.ring_obj);
2780 radeon_bo_unpin(rdev->ih.ring_obj);
2781 radeon_bo_unreserve(rdev->ih.ring_obj);
2782 }
2783 radeon_bo_unref(&rdev->ih.ring_obj);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002784 rdev->ih.ring = NULL;
2785 rdev->ih.ring_obj = NULL;
2786 }
2787}
2788
Alex Deucher45f9a392010-03-24 13:55:51 -04002789void r600_rlc_stop(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002790{
2791
Alex Deucher45f9a392010-03-24 13:55:51 -04002792 if ((rdev->family >= CHIP_RV770) &&
2793 (rdev->family <= CHIP_RV740)) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002794 /* r7xx asics need to soft reset RLC before halting */
2795 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2796 RREG32(SRBM_SOFT_RESET);
2797 udelay(15000);
2798 WREG32(SRBM_SOFT_RESET, 0);
2799 RREG32(SRBM_SOFT_RESET);
2800 }
2801
2802 WREG32(RLC_CNTL, 0);
2803}
2804
2805static void r600_rlc_start(struct radeon_device *rdev)
2806{
2807 WREG32(RLC_CNTL, RLC_ENABLE);
2808}
2809
2810static int r600_rlc_init(struct radeon_device *rdev)
2811{
2812 u32 i;
2813 const __be32 *fw_data;
2814
2815 if (!rdev->rlc_fw)
2816 return -EINVAL;
2817
2818 r600_rlc_stop(rdev);
2819
2820 WREG32(RLC_HB_BASE, 0);
2821 WREG32(RLC_HB_CNTL, 0);
2822 WREG32(RLC_HB_RPTR, 0);
2823 WREG32(RLC_HB_WPTR, 0);
2824 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2825 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2826 WREG32(RLC_MC_CNTL, 0);
2827 WREG32(RLC_UCODE_CNTL, 0);
2828
2829 fw_data = (const __be32 *)rdev->rlc_fw->data;
Alex Deucher45f9a392010-03-24 13:55:51 -04002830 if (rdev->family >= CHIP_CEDAR) {
2831 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2832 WREG32(RLC_UCODE_ADDR, i);
2833 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2834 }
2835 } else if (rdev->family >= CHIP_RV770) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002836 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2837 WREG32(RLC_UCODE_ADDR, i);
2838 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2839 }
2840 } else {
2841 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2842 WREG32(RLC_UCODE_ADDR, i);
2843 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2844 }
2845 }
2846 WREG32(RLC_UCODE_ADDR, 0);
2847
2848 r600_rlc_start(rdev);
2849
2850 return 0;
2851}
2852
2853static void r600_enable_interrupts(struct radeon_device *rdev)
2854{
2855 u32 ih_cntl = RREG32(IH_CNTL);
2856 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2857
2858 ih_cntl |= ENABLE_INTR;
2859 ih_rb_cntl |= IH_RB_ENABLE;
2860 WREG32(IH_CNTL, ih_cntl);
2861 WREG32(IH_RB_CNTL, ih_rb_cntl);
2862 rdev->ih.enabled = true;
2863}
2864
Alex Deucher45f9a392010-03-24 13:55:51 -04002865void r600_disable_interrupts(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002866{
2867 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2868 u32 ih_cntl = RREG32(IH_CNTL);
2869
2870 ih_rb_cntl &= ~IH_RB_ENABLE;
2871 ih_cntl &= ~ENABLE_INTR;
2872 WREG32(IH_RB_CNTL, ih_rb_cntl);
2873 WREG32(IH_CNTL, ih_cntl);
2874 /* set rptr, wptr to 0 */
2875 WREG32(IH_RB_RPTR, 0);
2876 WREG32(IH_RB_WPTR, 0);
2877 rdev->ih.enabled = false;
2878 rdev->ih.wptr = 0;
2879 rdev->ih.rptr = 0;
2880}
2881
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002882static void r600_disable_interrupt_state(struct radeon_device *rdev)
2883{
2884 u32 tmp;
2885
Alex Deucher3555e532010-10-08 12:09:12 -04002886 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002887 WREG32(GRBM_INT_CNTL, 0);
2888 WREG32(DxMODE_INT_MASK, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05002889 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2890 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002891 if (ASIC_IS_DCE3(rdev)) {
2892 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2893 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2894 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2895 WREG32(DC_HPD1_INT_CONTROL, tmp);
2896 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2897 WREG32(DC_HPD2_INT_CONTROL, tmp);
2898 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2899 WREG32(DC_HPD3_INT_CONTROL, tmp);
2900 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2901 WREG32(DC_HPD4_INT_CONTROL, tmp);
2902 if (ASIC_IS_DCE32(rdev)) {
2903 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002904 WREG32(DC_HPD5_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002905 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002906 WREG32(DC_HPD6_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002907 }
2908 } else {
2909 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2910 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2911 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002912 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002913 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002914 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002915 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
Alex Deucher5898b1f2010-03-24 13:57:29 -04002916 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05002917 }
2918}
2919
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002920int r600_irq_init(struct radeon_device *rdev)
2921{
2922 int ret = 0;
2923 int rb_bufsz;
2924 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2925
2926 /* allocate ring */
Jerome Glisse0c452492010-01-15 14:44:37 +01002927 ret = r600_ih_ring_alloc(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002928 if (ret)
2929 return ret;
2930
2931 /* disable irqs */
2932 r600_disable_interrupts(rdev);
2933
2934 /* init rlc */
2935 ret = r600_rlc_init(rdev);
2936 if (ret) {
2937 r600_ih_ring_fini(rdev);
2938 return ret;
2939 }
2940
2941 /* setup interrupt control */
2942 /* set dummy read address to ring address */
2943 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2944 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2945 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2946 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2947 */
2948 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2949 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2950 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2951 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2952
2953 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2954 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2955
2956 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2957 IH_WPTR_OVERFLOW_CLEAR |
2958 (rb_bufsz << 1));
Alex Deucher724c80e2010-08-27 18:25:25 -04002959
2960 if (rdev->wb.enabled)
2961 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2962
2963 /* set the writeback address whether it's enabled or not */
2964 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2965 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002966
2967 WREG32(IH_RB_CNTL, ih_rb_cntl);
2968
2969 /* set rptr, wptr to 0 */
2970 WREG32(IH_RB_RPTR, 0);
2971 WREG32(IH_RB_WPTR, 0);
2972
2973 /* Default settings for IH_CNTL (disabled at first) */
2974 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2975 /* RPTR_REARM only works if msi's are enabled */
2976 if (rdev->msi_enabled)
2977 ih_cntl |= RPTR_REARM;
2978
2979#ifdef __BIG_ENDIAN
2980 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2981#endif
2982 WREG32(IH_CNTL, ih_cntl);
2983
2984 /* force the active interrupt state to all disabled */
Alex Deucher45f9a392010-03-24 13:55:51 -04002985 if (rdev->family >= CHIP_CEDAR)
2986 evergreen_disable_interrupt_state(rdev);
2987 else
2988 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002989
2990 /* enable irqs */
2991 r600_enable_interrupts(rdev);
2992
2993 return ret;
2994}
2995
Jerome Glisse0c452492010-01-15 14:44:37 +01002996void r600_irq_suspend(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002997{
Alex Deucher45f9a392010-03-24 13:55:51 -04002998 r600_irq_disable(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05002999 r600_rlc_stop(rdev);
Jerome Glisse0c452492010-01-15 14:44:37 +01003000}
3001
3002void r600_irq_fini(struct radeon_device *rdev)
3003{
3004 r600_irq_suspend(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003005 r600_ih_ring_fini(rdev);
3006}
3007
3008int r600_irq_set(struct radeon_device *rdev)
3009{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003010 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3011 u32 mode_int = 0;
3012 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
Alex Deucher2031f772010-04-22 12:52:11 -04003013 u32 grbm_int_cntl = 0;
Christian Koenigf2594932010-04-10 03:13:16 +02003014 u32 hdmi1, hdmi2;
Alex Deucher6f34be52010-11-21 10:59:01 -05003015 u32 d1grph = 0, d2grph = 0;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003016
Jerome Glisse003e69f2010-01-07 15:39:14 +01003017 if (!rdev->irq.installed) {
Joe Perchesfce7d612010-10-30 21:08:30 +00003018 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
Jerome Glisse003e69f2010-01-07 15:39:14 +01003019 return -EINVAL;
3020 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003021 /* don't enable anything if the ih is disabled */
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003022 if (!rdev->ih.enabled) {
3023 r600_disable_interrupts(rdev);
3024 /* force the active interrupt state to all disabled */
3025 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003026 return 0;
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003027 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003028
Christian Koenigf2594932010-04-10 03:13:16 +02003029 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003030 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003031 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003032 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3033 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3034 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3035 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3036 if (ASIC_IS_DCE32(rdev)) {
3037 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3038 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3039 }
3040 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003041 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003042 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3043 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3044 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3045 }
3046
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003047 if (rdev->irq.sw_int) {
3048 DRM_DEBUG("r600_irq_set: sw int\n");
3049 cp_int_cntl |= RB_INT_ENABLE;
Alex Deucherd0f8a852010-09-04 05:04:34 -04003050 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003051 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003052 if (rdev->irq.crtc_vblank_int[0] ||
3053 rdev->irq.pflip[0]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003054 DRM_DEBUG("r600_irq_set: vblank 0\n");
3055 mode_int |= D1MODE_VBLANK_INT_MASK;
3056 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003057 if (rdev->irq.crtc_vblank_int[1] ||
3058 rdev->irq.pflip[1]) {
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003059 DRM_DEBUG("r600_irq_set: vblank 1\n");
3060 mode_int |= D2MODE_VBLANK_INT_MASK;
3061 }
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003062 if (rdev->irq.hpd[0]) {
3063 DRM_DEBUG("r600_irq_set: hpd 1\n");
3064 hpd1 |= DC_HPDx_INT_EN;
3065 }
3066 if (rdev->irq.hpd[1]) {
3067 DRM_DEBUG("r600_irq_set: hpd 2\n");
3068 hpd2 |= DC_HPDx_INT_EN;
3069 }
3070 if (rdev->irq.hpd[2]) {
3071 DRM_DEBUG("r600_irq_set: hpd 3\n");
3072 hpd3 |= DC_HPDx_INT_EN;
3073 }
3074 if (rdev->irq.hpd[3]) {
3075 DRM_DEBUG("r600_irq_set: hpd 4\n");
3076 hpd4 |= DC_HPDx_INT_EN;
3077 }
3078 if (rdev->irq.hpd[4]) {
3079 DRM_DEBUG("r600_irq_set: hpd 5\n");
3080 hpd5 |= DC_HPDx_INT_EN;
3081 }
3082 if (rdev->irq.hpd[5]) {
3083 DRM_DEBUG("r600_irq_set: hpd 6\n");
3084 hpd6 |= DC_HPDx_INT_EN;
3085 }
Christian Koenigf2594932010-04-10 03:13:16 +02003086 if (rdev->irq.hdmi[0]) {
3087 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3088 hdmi1 |= R600_HDMI_INT_EN;
3089 }
3090 if (rdev->irq.hdmi[1]) {
3091 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3092 hdmi2 |= R600_HDMI_INT_EN;
3093 }
Alex Deucher2031f772010-04-22 12:52:11 -04003094 if (rdev->irq.gui_idle) {
3095 DRM_DEBUG("gui idle\n");
3096 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3097 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003098
3099 WREG32(CP_INT_CNTL, cp_int_cntl);
3100 WREG32(DxMODE_INT_MASK, mode_int);
Alex Deucher6f34be52010-11-21 10:59:01 -05003101 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3102 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
Alex Deucher2031f772010-04-22 12:52:11 -04003103 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
Christian Koenigf2594932010-04-10 03:13:16 +02003104 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003105 if (ASIC_IS_DCE3(rdev)) {
Christian Koenigf2594932010-04-10 03:13:16 +02003106 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003107 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3108 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3109 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3110 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3111 if (ASIC_IS_DCE32(rdev)) {
3112 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3113 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3114 }
3115 } else {
Christian Koenigf2594932010-04-10 03:13:16 +02003116 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003117 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3118 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3119 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3120 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003121
3122 return 0;
3123}
3124
Alex Deucher6f34be52010-11-21 10:59:01 -05003125static inline void r600_irq_ack(struct radeon_device *rdev)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003126{
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003127 u32 tmp;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003128
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003129 if (ASIC_IS_DCE3(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003130 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3131 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3132 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003133 } else {
Alex Deucher6f34be52010-11-21 10:59:01 -05003134 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3135 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3136 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003137 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003138 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3139 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003140
Alex Deucher6f34be52010-11-21 10:59:01 -05003141 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3142 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3143 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3144 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3145 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003146 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003147 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003148 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003149 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003150 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003151 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003152 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
Alex Deucher6f34be52010-11-21 10:59:01 -05003153 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003154 if (ASIC_IS_DCE3(rdev)) {
3155 tmp = RREG32(DC_HPD1_INT_CONTROL);
3156 tmp |= DC_HPDx_INT_ACK;
3157 WREG32(DC_HPD1_INT_CONTROL, tmp);
3158 } else {
3159 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3160 tmp |= DC_HPDx_INT_ACK;
3161 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3162 }
3163 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003164 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003165 if (ASIC_IS_DCE3(rdev)) {
3166 tmp = RREG32(DC_HPD2_INT_CONTROL);
3167 tmp |= DC_HPDx_INT_ACK;
3168 WREG32(DC_HPD2_INT_CONTROL, tmp);
3169 } else {
3170 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3171 tmp |= DC_HPDx_INT_ACK;
3172 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3173 }
3174 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003175 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003176 if (ASIC_IS_DCE3(rdev)) {
3177 tmp = RREG32(DC_HPD3_INT_CONTROL);
3178 tmp |= DC_HPDx_INT_ACK;
3179 WREG32(DC_HPD3_INT_CONTROL, tmp);
3180 } else {
3181 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3182 tmp |= DC_HPDx_INT_ACK;
3183 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3184 }
3185 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003186 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003187 tmp = RREG32(DC_HPD4_INT_CONTROL);
3188 tmp |= DC_HPDx_INT_ACK;
3189 WREG32(DC_HPD4_INT_CONTROL, tmp);
3190 }
3191 if (ASIC_IS_DCE32(rdev)) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003192 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003193 tmp = RREG32(DC_HPD5_INT_CONTROL);
3194 tmp |= DC_HPDx_INT_ACK;
3195 WREG32(DC_HPD5_INT_CONTROL, tmp);
3196 }
Alex Deucher6f34be52010-11-21 10:59:01 -05003197 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003198 tmp = RREG32(DC_HPD5_INT_CONTROL);
3199 tmp |= DC_HPDx_INT_ACK;
3200 WREG32(DC_HPD6_INT_CONTROL, tmp);
3201 }
3202 }
Christian Koenigf2594932010-04-10 03:13:16 +02003203 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3204 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3205 }
3206 if (ASIC_IS_DCE3(rdev)) {
3207 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3208 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3209 }
3210 } else {
3211 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3212 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3213 }
3214 }
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003215}
3216
3217void r600_irq_disable(struct radeon_device *rdev)
3218{
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003219 r600_disable_interrupts(rdev);
3220 /* Wait and acknowledge irq */
3221 mdelay(1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003222 r600_irq_ack(rdev);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003223 r600_disable_interrupt_state(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003224}
3225
3226static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3227{
3228 u32 wptr, tmp;
3229
Alex Deucher724c80e2010-08-27 18:25:25 -04003230 if (rdev->wb.enabled)
3231 wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
3232 else
3233 wptr = RREG32(IH_RB_WPTR);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003234
3235 if (wptr & RB_OVERFLOW) {
Jerome Glisse7924e5e2010-01-15 14:44:39 +01003236 /* When a ring buffer overflow happen start parsing interrupt
3237 * from the last not overwritten vector (wptr + 16). Hopefully
3238 * this should allow us to catchup.
3239 */
3240 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3241 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3242 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003243 tmp = RREG32(IH_RB_CNTL);
3244 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3245 WREG32(IH_RB_CNTL, tmp);
3246 }
Jerome Glisse0c452492010-01-15 14:44:37 +01003247 return (wptr & rdev->ih.ptr_mask);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003248}
3249
3250/* r600 IV Ring
3251 * Each IV ring entry is 128 bits:
3252 * [7:0] - interrupt source id
3253 * [31:8] - reserved
3254 * [59:32] - interrupt source data
3255 * [127:60] - reserved
3256 *
3257 * The basic interrupt vector entries
3258 * are decoded as follows:
3259 * src_id src_data description
3260 * 1 0 D1 Vblank
3261 * 1 1 D1 Vline
3262 * 5 0 D2 Vblank
3263 * 5 1 D2 Vline
3264 * 19 0 FP Hot plug detection A
3265 * 19 1 FP Hot plug detection B
3266 * 19 2 DAC A auto-detection
3267 * 19 3 DAC B auto-detection
Christian Koenigf2594932010-04-10 03:13:16 +02003268 * 21 4 HDMI block A
3269 * 21 5 HDMI block B
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003270 * 176 - CP_INT RB
3271 * 177 - CP_INT IB1
3272 * 178 - CP_INT IB2
3273 * 181 - EOP Interrupt
3274 * 233 - GUI Idle
3275 *
3276 * Note, these are based on r600 and may need to be
3277 * adjusted or added to on newer asics
3278 */
3279
3280int r600_irq_process(struct radeon_device *rdev)
3281{
3282 u32 wptr = r600_get_ih_wptr(rdev);
3283 u32 rptr = rdev->ih.rptr;
3284 u32 src_id, src_data;
Alex Deucher6f34be52010-11-21 10:59:01 -05003285 u32 ring_index;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003286 unsigned long flags;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003287 bool queue_hotplug = false;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003288
3289 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
Jerome Glisse79c2bbc2010-01-15 14:44:38 +01003290 if (!rdev->ih.enabled)
3291 return IRQ_NONE;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003292
3293 spin_lock_irqsave(&rdev->ih.lock, flags);
3294
3295 if (rptr == wptr) {
3296 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3297 return IRQ_NONE;
3298 }
3299 if (rdev->shutdown) {
3300 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3301 return IRQ_NONE;
3302 }
3303
3304restart_ih:
3305 /* display interrupts */
Alex Deucher6f34be52010-11-21 10:59:01 -05003306 r600_irq_ack(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003307
3308 rdev->ih.wptr = wptr;
3309 while (rptr != wptr) {
3310 /* wptr/rptr are in bytes! */
3311 ring_index = rptr / 4;
Cédric Cano4eace7f2011-02-11 19:45:38 -05003312 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3313 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003314
3315 switch (src_id) {
3316 case 1: /* D1 vblank/vline */
3317 switch (src_data) {
3318 case 0: /* D1 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003319 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003320 if (rdev->irq.crtc_vblank_int[0]) {
3321 drm_handle_vblank(rdev->ddev, 0);
3322 rdev->pm.vblank_sync = true;
3323 wake_up(&rdev->irq.vblank_queue);
3324 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003325 if (rdev->irq.pflip[0])
3326 radeon_crtc_handle_flip(rdev, 0);
Alex Deucher6f34be52010-11-21 10:59:01 -05003327 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003328 DRM_DEBUG("IH: D1 vblank\n");
3329 }
3330 break;
3331 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003332 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3333 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003334 DRM_DEBUG("IH: D1 vline\n");
3335 }
3336 break;
3337 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003338 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003339 break;
3340 }
3341 break;
3342 case 5: /* D2 vblank/vline */
3343 switch (src_data) {
3344 case 0: /* D2 vblank */
Alex Deucher6f34be52010-11-21 10:59:01 -05003345 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
Alex Deucher6f34be52010-11-21 10:59:01 -05003346 if (rdev->irq.crtc_vblank_int[1]) {
3347 drm_handle_vblank(rdev->ddev, 1);
3348 rdev->pm.vblank_sync = true;
3349 wake_up(&rdev->irq.vblank_queue);
3350 }
Mario Kleiner3e4ea742010-11-21 10:59:02 -05003351 if (rdev->irq.pflip[1])
3352 radeon_crtc_handle_flip(rdev, 1);
Alex Deucher6f34be52010-11-21 10:59:01 -05003353 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003354 DRM_DEBUG("IH: D2 vblank\n");
3355 }
3356 break;
3357 case 1: /* D1 vline */
Alex Deucher6f34be52010-11-21 10:59:01 -05003358 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3359 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003360 DRM_DEBUG("IH: D2 vline\n");
3361 }
3362 break;
3363 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003364 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003365 break;
3366 }
3367 break;
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003368 case 19: /* HPD/DAC hotplug */
3369 switch (src_data) {
3370 case 0:
Alex Deucher6f34be52010-11-21 10:59:01 -05003371 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3372 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003373 queue_hotplug = true;
3374 DRM_DEBUG("IH: HPD1\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003375 }
3376 break;
3377 case 1:
Alex Deucher6f34be52010-11-21 10:59:01 -05003378 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3379 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003380 queue_hotplug = true;
3381 DRM_DEBUG("IH: HPD2\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003382 }
3383 break;
3384 case 4:
Alex Deucher6f34be52010-11-21 10:59:01 -05003385 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3386 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003387 queue_hotplug = true;
3388 DRM_DEBUG("IH: HPD3\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003389 }
3390 break;
3391 case 5:
Alex Deucher6f34be52010-11-21 10:59:01 -05003392 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3393 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003394 queue_hotplug = true;
3395 DRM_DEBUG("IH: HPD4\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003396 }
3397 break;
3398 case 10:
Alex Deucher6f34be52010-11-21 10:59:01 -05003399 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3400 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003401 queue_hotplug = true;
3402 DRM_DEBUG("IH: HPD5\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003403 }
3404 break;
3405 case 12:
Alex Deucher6f34be52010-11-21 10:59:01 -05003406 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3407 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003408 queue_hotplug = true;
3409 DRM_DEBUG("IH: HPD6\n");
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003410 }
3411 break;
3412 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003413 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deuchere0df1ac2009-12-04 15:12:21 -05003414 break;
3415 }
3416 break;
Christian Koenigf2594932010-04-10 03:13:16 +02003417 case 21: /* HDMI */
3418 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3419 r600_audio_schedule_polling(rdev);
3420 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003421 case 176: /* CP_INT in ring buffer */
3422 case 177: /* CP_INT in IB1 */
3423 case 178: /* CP_INT in IB2 */
3424 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3425 radeon_fence_process(rdev);
3426 break;
3427 case 181: /* CP EOP event */
3428 DRM_DEBUG("IH: CP EOP\n");
Alex Deucherd0f8a852010-09-04 05:04:34 -04003429 radeon_fence_process(rdev);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003430 break;
Alex Deucher2031f772010-04-22 12:52:11 -04003431 case 233: /* GUI IDLE */
3432 DRM_DEBUG("IH: CP EOP\n");
3433 rdev->pm.gui_idle = true;
3434 wake_up(&rdev->irq.idle_queue);
3435 break;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003436 default:
Alex Deucherb0425892010-01-11 19:47:38 -05003437 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003438 break;
3439 }
3440
3441 /* wptr/rptr are in bytes! */
Jerome Glisse0c452492010-01-15 14:44:37 +01003442 rptr += 16;
3443 rptr &= rdev->ih.ptr_mask;
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003444 }
3445 /* make sure wptr hasn't changed while processing */
3446 wptr = r600_get_ih_wptr(rdev);
3447 if (wptr != rdev->ih.wptr)
3448 goto restart_ih;
Alex Deucherd4877cf2009-12-04 16:56:37 -05003449 if (queue_hotplug)
Tejun Heo32c87fc2011-01-03 14:49:32 +01003450 schedule_work(&rdev->hotplug_work);
Alex Deucherd8f60cf2009-12-01 13:43:46 -05003451 rdev->ih.rptr = rptr;
3452 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3453 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3454 return IRQ_HANDLED;
3455}
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003456
3457/*
3458 * Debugfs info
3459 */
3460#if defined(CONFIG_DEBUG_FS)
3461
3462static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3463{
3464 struct drm_info_node *node = (struct drm_info_node *) m->private;
3465 struct drm_device *dev = node->minor->dev;
3466 struct radeon_device *rdev = dev->dev_private;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003467 unsigned count, i, j;
3468
3469 radeon_ring_free_size(rdev);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003470 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003471 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
Rafał Miłeckid6840762009-11-10 22:26:21 +01003472 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3473 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3474 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3475 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003476 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3477 seq_printf(m, "%u dwords in ring\n", count);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003478 i = rdev->cp.rptr;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003479 for (j = 0; j <= count; j++) {
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003480 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
Rafał Miłeckid6840762009-11-10 22:26:21 +01003481 i = (i + 1) & rdev->cp.ptr_mask;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10003482 }
3483 return 0;
3484}
3485
3486static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3487{
3488 struct drm_info_node *node = (struct drm_info_node *) m->private;
3489 struct drm_device *dev = node->minor->dev;
3490 struct radeon_device *rdev = dev->dev_private;
3491
3492 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3493 DREG32_SYS(m, rdev, VM_L2_STATUS);
3494 return 0;
3495}
3496
3497static struct drm_info_list r600_mc_info_list[] = {
3498 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3499 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3500};
3501#endif
3502
3503int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3504{
3505#if defined(CONFIG_DEBUG_FS)
3506 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3507#else
3508 return 0;
3509#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02003510}
Jerome Glisse062b3892010-02-04 20:36:39 +01003511
3512/**
3513 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3514 * rdev: radeon device structure
3515 * bo: buffer object struct which userspace is waiting for idle
3516 *
3517 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3518 * through ring buffer, this leads to corruption in rendering, see
3519 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3520 * directly perform HDP flush by writing register through MMIO.
3521 */
3522void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3523{
Alex Deucher812d0462010-07-26 18:51:53 -04003524 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
Alex Deucherf3886f82010-12-08 10:05:34 -05003525 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3526 * This seems to cause problems on some AGP cards. Just use the old
3527 * method for them.
Alex Deucher812d0462010-07-26 18:51:53 -04003528 */
Alex Deuchere4884592010-09-27 10:57:10 -04003529 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
Alex Deucherf3886f82010-12-08 10:05:34 -05003530 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04003531 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
Alex Deucher812d0462010-07-26 18:51:53 -04003532 u32 tmp;
3533
3534 WREG32(HDP_DEBUG1, 0);
3535 tmp = readl((void __iomem *)ptr);
3536 } else
3537 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
Jerome Glisse062b3892010-02-04 20:36:39 +01003538}
Alex Deucher3313e3d2011-01-06 18:49:34 -05003539
3540void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3541{
3542 u32 link_width_cntl, mask, target_reg;
3543
3544 if (rdev->flags & RADEON_IS_IGP)
3545 return;
3546
3547 if (!(rdev->flags & RADEON_IS_PCIE))
3548 return;
3549
3550 /* x2 cards have a special sequence */
3551 if (ASIC_IS_X2(rdev))
3552 return;
3553
3554 /* FIXME wait for idle */
3555
3556 switch (lanes) {
3557 case 0:
3558 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3559 break;
3560 case 1:
3561 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3562 break;
3563 case 2:
3564 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3565 break;
3566 case 4:
3567 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3568 break;
3569 case 8:
3570 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3571 break;
3572 case 12:
3573 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3574 break;
3575 case 16:
3576 default:
3577 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3578 break;
3579 }
3580
3581 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3582
3583 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3584 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3585 return;
3586
3587 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3588 return;
3589
3590 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3591 RADEON_PCIE_LC_RECONFIG_NOW |
3592 R600_PCIE_LC_RENEGOTIATE_EN |
3593 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3594 link_width_cntl |= mask;
3595
3596 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3597
3598 /* some northbridges can renegotiate the link rather than requiring
3599 * a complete re-config.
3600 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3601 */
3602 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3603 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3604 else
3605 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3606
3607 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3608 RADEON_PCIE_LC_RECONFIG_NOW));
3609
3610 if (rdev->family >= CHIP_RV770)
3611 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3612 else
3613 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3614
3615 /* wait for lane set to complete */
3616 link_width_cntl = RREG32(target_reg);
3617 while (link_width_cntl == 0xffffffff)
3618 link_width_cntl = RREG32(target_reg);
3619
3620}
3621
3622int r600_get_pcie_lanes(struct radeon_device *rdev)
3623{
3624 u32 link_width_cntl;
3625
3626 if (rdev->flags & RADEON_IS_IGP)
3627 return 0;
3628
3629 if (!(rdev->flags & RADEON_IS_PCIE))
3630 return 0;
3631
3632 /* x2 cards have a special sequence */
3633 if (ASIC_IS_X2(rdev))
3634 return 0;
3635
3636 /* FIXME wait for idle */
3637
3638 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3639
3640 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3641 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3642 return 0;
3643 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3644 return 1;
3645 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3646 return 2;
3647 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3648 return 4;
3649 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3650 return 8;
3651 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3652 default:
3653 return 16;
3654 }
3655}
3656
Alex Deucher9e46a482011-01-06 18:49:35 -05003657static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3658{
3659 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3660 u16 link_cntl2;
3661
Alex Deucherd42dd572011-01-12 20:05:11 -05003662 if (radeon_pcie_gen2 == 0)
3663 return;
3664
Alex Deucher9e46a482011-01-06 18:49:35 -05003665 if (rdev->flags & RADEON_IS_IGP)
3666 return;
3667
3668 if (!(rdev->flags & RADEON_IS_PCIE))
3669 return;
3670
3671 /* x2 cards have a special sequence */
3672 if (ASIC_IS_X2(rdev))
3673 return;
3674
3675 /* only RV6xx+ chips are supported */
3676 if (rdev->family <= CHIP_R600)
3677 return;
3678
3679 /* 55 nm r6xx asics */
3680 if ((rdev->family == CHIP_RV670) ||
3681 (rdev->family == CHIP_RV620) ||
3682 (rdev->family == CHIP_RV635)) {
3683 /* advertise upconfig capability */
3684 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3685 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3686 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3687 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3688 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3689 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3690 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3691 LC_RECONFIG_ARC_MISSING_ESCAPE);
3692 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3693 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3694 } else {
3695 link_width_cntl |= LC_UPCONFIGURE_DIS;
3696 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3697 }
3698 }
3699
3700 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3701 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3702 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3703
3704 /* 55 nm r6xx asics */
3705 if ((rdev->family == CHIP_RV670) ||
3706 (rdev->family == CHIP_RV620) ||
3707 (rdev->family == CHIP_RV635)) {
3708 WREG32(MM_CFGREGS_CNTL, 0x8);
3709 link_cntl2 = RREG32(0x4088);
3710 WREG32(MM_CFGREGS_CNTL, 0);
3711 /* not supported yet */
3712 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3713 return;
3714 }
3715
3716 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3717 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3718 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3719 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3720 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3721 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3722
3723 tmp = RREG32(0x541c);
3724 WREG32(0x541c, tmp | 0x8);
3725 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3726 link_cntl2 = RREG16(0x4088);
3727 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3728 link_cntl2 |= 0x2;
3729 WREG16(0x4088, link_cntl2);
3730 WREG32(MM_CFGREGS_CNTL, 0);
3731
3732 if ((rdev->family == CHIP_RV670) ||
3733 (rdev->family == CHIP_RV620) ||
3734 (rdev->family == CHIP_RV635)) {
3735 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3736 training_cntl &= ~LC_POINT_7_PLUS_EN;
3737 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3738 } else {
3739 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3740 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3741 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3742 }
3743
3744 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3745 speed_cntl |= LC_GEN2_EN_STRAP;
3746 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3747
3748 } else {
3749 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3750 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3751 if (1)
3752 link_width_cntl |= LC_UPCONFIGURE_DIS;
3753 else
3754 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3755 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3756 }
3757}