Merge remote branch 'korg/drm-radeon-testing' into drm-next-stage
authorDave Airlie <airlied@redhat.com>
Thu, 25 Feb 2010 03:44:04 +0000 (13:44 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 26 Feb 2010 06:23:23 +0000 (16:23 +1000)
* korg/drm-radeon-testing: (62 commits)
  drm/radeon/kms: update new pll algo
  drm/radeon/kms: add support for square microtiles on r3xx-r5xx
  drm/radeon/kms: force pinning buffer into visible VRAM
  drm/radeon/kms/evergreen: fix typo in cursor code
  drm/radeon/kms: implement reading active PCIE lanes on R600+
  drm/radeon/kms: for downclocking non-mobility check PERFORMANCE state
  drm/radeon/kms: simplify storing current and requested PM mode
  drm/radeon: fixes for r6xx/r7xx gfx init
  drm/radeon/rv740: fix backend setup
  drm/radeon/kms: fix R3XX/R4XX memory controller initialization
  [rfc] drm/radeon/kms: pm debugging check for vbl.
  drm/radeon: Fix memory allocation failures in the preKMS command stream checking.
  drm: Add generic multipart buffer.
  drm/radeon/kms: simplify memory controller setup V2
  drm/radeon: Add asic hook for dma copy to r200 cards.
  drm/radeon/kms: Create asic structure for r300 pcie cards.
  drm/radeon/kms: remove unused r600_gart_clear_page
  drm/radeon/kms: remove HDP flushes from fence emit (v2)
  drm/radeon/kms: add LVDS pll quirk for Dell Studio 15
  drm/radeon/kms: simplify picking power state
  ...

Conflicts:
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/rv770.c

13 files changed:
1  2 
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_audio.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/radeon_ttm.c

index ab6c973304129383ca733eeb2f49b117b55b421a,f41e91ceaea67547ceea6e186e961e0e4ca32399..f97e7c42ac8e1f7bdf53573e491feb0a80f2989f
@@@ -60,8 -60,7 +60,7 @@@
  #define EDID_QUIRK_FIRST_DETAILED_PREFERRED   (1 << 5)
  /* use +hsync +vsync for detailed mode */
  #define EDID_QUIRK_DETAILED_SYNC_PP           (1 << 6)
- /* define the number of Extension EDID block */
- #define MAX_EDID_EXT_NUM 4
  
  #define LEVEL_DMT     0
  #define LEVEL_GTF     1
@@@ -114,14 -113,14 +113,14 @@@ static const u8 edid_header[] = 
  };
  
  /**
-  * edid_is_valid - sanity check EDID data
+  * drm_edid_is_valid - sanity check EDID data
   * @edid: EDID data
   *
   * Sanity check the EDID block by looking at the header, the version number
   * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
   * valid.
   */
static bool edid_is_valid(struct edid *edid)
bool drm_edid_is_valid(struct edid *edid)
  {
        int i, score = 0;
        u8 csum = 0;
@@@ -163,6 -162,7 +162,7 @@@ bad
        }
        return 0;
  }
+ EXPORT_SYMBOL(drm_edid_is_valid);
  
  /**
   * edid_vendor - match a string against EDID's obfuscated vendor field
@@@ -598,50 -598,6 +598,50 @@@ struct drm_display_mode *drm_mode_std(s
        return mode;
  }
  
 +/*
 + * EDID is delightfully ambiguous about how interlaced modes are to be
 + * encoded.  Our internal representation is of frame height, but some
 + * HDTV detailed timings are encoded as field height.
 + *
 + * The format list here is from CEA, in frame size.  Technically we
 + * should be checking refresh rate too.  Whatever.
 + */
 +static void
 +drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
 +                          struct detailed_pixel_timing *pt)
 +{
 +      int i;
 +      static const struct {
 +              int w, h;
 +      } cea_interlaced[] = {
 +              { 1920, 1080 },
 +              {  720,  480 },
 +              { 1440,  480 },
 +              { 2880,  480 },
 +              {  720,  576 },
 +              { 1440,  576 },
 +              { 2880,  576 },
 +      };
 +      static const int n_sizes =
 +              sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
 +
 +      if (!(pt->misc & DRM_EDID_PT_INTERLACED))
 +              return;
 +
 +      for (i = 0; i < n_sizes; i++) {
 +              if ((mode->hdisplay == cea_interlaced[i].w) &&
 +                  (mode->vdisplay == cea_interlaced[i].h / 2)) {
 +                      mode->vdisplay *= 2;
 +                      mode->vsync_start *= 2;
 +                      mode->vsync_end *= 2;
 +                      mode->vtotal *= 2;
 +                      mode->vtotal |= 1;
 +              }
 +      }
 +
 +      mode->flags |= DRM_MODE_FLAG_INTERLACE;
 +}
 +
  /**
   * drm_mode_detailed - create a new mode from an EDID detailed timing section
   * @dev: DRM device (needed to create new mode)
@@@ -724,7 -680,8 +724,7 @@@ static struct drm_display_mode *drm_mod
  
        drm_mode_set_name(mode);
  
 -      if (pt->misc & DRM_EDID_PT_INTERLACED)
 -              mode->flags |= DRM_MODE_FLAG_INTERLACE;
 +      drm_mode_do_interlace_quirk(mode, pt);
  
        if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
                pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@@ -1112,8 -1069,8 +1112,8 @@@ static int add_detailed_info_eedid(stru
        }
  
        /* Chose real EDID extension number */
-       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
-                      MAX_EDID_EXT_NUM : edid->extensions;
+       edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+               DRM_MAX_EDID_EXT_NUM : edid->extensions;
  
        /* Find CEA extension */
        for (i = 0; i < edid_ext_num; i++) {
@@@ -1195,7 -1152,7 +1195,7 @@@ static int drm_ddc_read_edid(struct drm
        for (i = 0; i < 4; i++) {
                if (drm_do_probe_ddc_edid(adapter, buf, len))
                        return -1;
-               if (edid_is_valid((struct edid *)buf))
+               if (drm_edid_is_valid((struct edid *)buf))
                        return 0;
        }
  
@@@ -1220,7 -1177,7 +1220,7 @@@ struct edid *drm_get_edid(struct drm_co
        int ret;
        struct edid *edid;
  
-       edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
+       edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
                       GFP_KERNEL);
        if (edid == NULL) {
                dev_warn(&connector->dev->pdev->dev,
        if (edid->extensions != 0) {
                int edid_ext_num = edid->extensions;
  
-               if (edid_ext_num > MAX_EDID_EXT_NUM) {
+               if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
                        dev_warn(&connector->dev->pdev->dev,
                                 "The number of extension(%d) is "
                                 "over max (%d), actually read number (%d)\n",
-                                edid_ext_num, MAX_EDID_EXT_NUM,
-                                MAX_EDID_EXT_NUM);
+                                edid_ext_num, DRM_MAX_EDID_EXT_NUM,
+                                DRM_MAX_EDID_EXT_NUM);
                        /* Reset EDID extension number to be read */
-                       edid_ext_num = MAX_EDID_EXT_NUM;
+                       edid_ext_num = DRM_MAX_EDID_EXT_NUM;
                }
                /* Read EDID including extensions too */
                ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@@ -1288,8 -1245,8 +1288,8 @@@ bool drm_detect_hdmi_monitor(struct edi
                goto end;
  
        /* Chose real EDID extension number */
-       edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
-                      MAX_EDID_EXT_NUM : edid->extensions;
+       edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+                      DRM_MAX_EDID_EXT_NUM : edid->extensions;
  
        /* Find CEA extension */
        for (i = 0; i < edid_ext_num; i++) {
@@@ -1346,7 -1303,7 +1346,7 @@@ int drm_add_edid_modes(struct drm_conne
        if (edid == NULL) {
                return 0;
        }
-       if (!edid_is_valid(edid)) {
+       if (!drm_edid_is_valid(edid)) {
                dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
                         drm_get_connector_name(connector));
                return 0;
index 99915a682d593deb4b4aabaf2cd81fd902bc9103,0b6f2cef1c52aef505cfe0874df70778f85dccf1..8a133bda00a25b092e764f1eec2bca80c57cb569
@@@ -321,6 -321,10 +321,10 @@@ static void dp_get_adjust_train(u8 link
                train_set[lane] = v | p;
  }
  
+ union aux_channel_transaction {
+       PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+       PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+ };
  
  /* radeon aux chan functions */
  bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
  {
        struct drm_device *dev = chan->dev;
        struct radeon_device *rdev = dev->dev_private;
-       PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+       union aux_channel_transaction args;
        int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
        unsigned char *base;
 +      int retry_count = 0;
  
        memset(&args, 0, sizeof(args));
  
        base = (unsigned char *)rdev->mode_info.atom_context->scratch;
  
 +retry:
        memcpy(base, req_bytes, num_bytes);
  
-       args.lpAuxRequest = 0;
-       args.lpDataOut = 16;
-       args.ucDataOutLen = 0;
-       args.ucChannelID = chan->rec.i2c_id;
-       args.ucDelay = delay / 10;
+       args.v1.lpAuxRequest = 0;
+       args.v1.lpDataOut = 16;
+       args.v1.ucDataOutLen = 0;
+       args.v1.ucChannelID = chan->rec.i2c_id;
+       args.v1.ucDelay = delay / 10;
+       if (ASIC_IS_DCE4(rdev))
+               args.v2.ucHPD_ID = chan->rec.hpd_id;
  
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
  
-       if (args.ucReplyStatus && !args.ucDataOutLen) {
-               if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
 -      if (args.v1.ucReplyStatus) {
 -              DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
++      if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
++              if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
 +                      goto retry;
 +              DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
                          req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
-                         chan->rec.i2c_id, args.ucReplyStatus, retry_count);
 -                        chan->rec.i2c_id, args.v1.ucReplyStatus);
++                        chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
                return false;
        }
  
-       if (args.ucDataOutLen && read_byte && read_buf_len) {
-               if (read_buf_len < args.ucDataOutLen) {
+       if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
+               if (read_buf_len < args.v1.ucDataOutLen) {
                        DRM_ERROR("Buffer to small for return answer %d %d\n",
-                                 read_buf_len, args.ucDataOutLen);
+                                 read_buf_len, args.v1.ucDataOutLen);
                        return false;
                }
                {
-                       int len = min(read_buf_len, args.ucDataOutLen);
+                       int len = min(read_buf_len, args.v1.ucDataOutLen);
                        memcpy(read_byte, base + 16, len);
                }
        }
@@@ -626,12 -628,19 +632,19 @@@ void dp_link_train(struct drm_encoder *
        dp_set_link_bw_lanes(radeon_connector, link_configuration);
        /* disable downspread on the sink */
        dp_set_downspread(radeon_connector, 0);
-       /* start training on the source */
-       radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
-                                 dig_connector->dp_clock, enc_id, 0);
-       /* set training pattern 1 on the source */
-       radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
-                                 dig_connector->dp_clock, enc_id, 0);
+       if (ASIC_IS_DCE4(rdev)) {
+               /* start training on the source */
+               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
+               /* set training pattern 1 on the source */
+               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
+       } else {
+               /* start training on the source */
+               radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+                                         dig_connector->dp_clock, enc_id, 0);
+               /* set training pattern 1 on the source */
+               radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+                                         dig_connector->dp_clock, enc_id, 0);
+       }
  
        /* set initial vs/emph */
        memset(train_set, 0, 4);
        /* set training pattern 2 on the sink */
        dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
        /* set training pattern 2 on the source */
-       radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
-                                 dig_connector->dp_clock, enc_id, 1);
+       if (ASIC_IS_DCE4(rdev))
+               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
+       else
+               radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+                                         dig_connector->dp_clock, enc_id, 1);
  
        /* channel equalization loop */
        tries = 0;
                          >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
  
        /* disable the training pattern on the sink */
-       dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+       if (ASIC_IS_DCE4(rdev))
+               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
+       else
+               radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+                                         dig_connector->dp_clock, enc_id, 0);
  
        radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
                                  dig_connector->dp_clock, enc_id, 0);
index 2ffcf5a03551e94b5fdbc32d3dca1eeae8c9ba67,f9a83358aa5a21201ed2a5fff338f319ec43ad00..c52290197292a1a958d05dcd3896587055542b8b
@@@ -353,23 -353,14 +353,14 @@@ void r600_hpd_fini(struct radeon_devic
  /*
   * R600 PCIE GART
   */
- int r600_gart_clear_page(struct radeon_device *rdev, int i)
- {
-       void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-       u64 pte;
-       if (i < 0 || i > rdev->gart.num_gpu_pages)
-               return -EINVAL;
-       pte = 0;
-       writeq(pte, ((void __iomem *)ptr) + (i * 8));
-       return 0;
- }
  void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
  {
        unsigned i;
        u32 tmp;
  
+       /* flush hdp cache so updates hit vram */
+       WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
        WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
        WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
        WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@@ -416,6 -407,7 +407,7 @@@ int r600_pcie_gart_enable(struct radeon
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
  
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@@ -619,6 -611,68 +611,68 @@@ static void r600_mc_program(struct rade
        rv515_vga_render_disable(rdev);
  }
  
+ /**
+  * r600_vram_gtt_location - try to find VRAM & GTT location
+  * @rdev: radeon device structure holding all necessary informations
+  * @mc: memory controller structure holding memory informations
+  *
+  * Function will place try to place VRAM at same place as in CPU (PCI)
+  * address space as some GPU seems to have issue when we reprogram at
+  * different address space.
+  *
+  * If there is not enough space to fit the unvisible VRAM after the
+  * aperture then we limit the VRAM size to the aperture.
+  *
+  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+  * them to be in one from GPU point of view so that we can program GPU to
+  * catch access outside them (weird GPU policy see ??).
+  *
+  * This function will never fails, worst case are limiting VRAM or GTT.
+  *
+  * Note: GTT start, end, size should be initialized before calling this
+  * function on AGP platform.
+  */
+ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ {
+       u64 size_bf, size_af;
+       if (mc->mc_vram_size > 0xE0000000) {
+               /* leave room for at least 512M GTT */
+               dev_warn(rdev->dev, "limiting VRAM\n");
+               mc->real_vram_size = 0xE0000000;
+               mc->mc_vram_size = 0xE0000000;
+       }
+       if (rdev->flags & RADEON_IS_AGP) {
+               size_bf = mc->gtt_start;
+               size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+               if (size_bf > size_af) {
+                       if (mc->mc_vram_size > size_bf) {
+                               dev_warn(rdev->dev, "limiting VRAM\n");
+                               mc->real_vram_size = size_bf;
+                               mc->mc_vram_size = size_bf;
+                       }
+                       mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+               } else {
+                       if (mc->mc_vram_size > size_af) {
+                               dev_warn(rdev->dev, "limiting VRAM\n");
+                               mc->real_vram_size = size_af;
+                               mc->mc_vram_size = size_af;
+                       }
+                       mc->vram_start = mc->gtt_end;
+               }
+               mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+               dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+                               mc->mc_vram_size >> 20, mc->vram_start,
+                               mc->vram_end, mc->real_vram_size >> 20);
+       } else {
+               u64 base = 0;
+               if (rdev->flags & RADEON_IS_IGP)
+                       base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+               radeon_vram_location(rdev, &rdev->mc, base);
+               radeon_gtt_location(rdev, mc);
+       }
+ }
  int r600_mc_init(struct radeon_device *rdev)
  {
        fixed20_12 a;
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-       if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+       rdev->mc.visible_vram_size = rdev->mc.aper_size;
+       /* FIXME remove this once we support unmappable VRAM */
+       if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
                rdev->mc.mc_vram_size = rdev->mc.aper_size;
-       if (rdev->mc.real_vram_size > rdev->mc.aper_size)
                rdev->mc.real_vram_size = rdev->mc.aper_size;
-       if (rdev->flags & RADEON_IS_AGP) {
-               /* gtt_size is setup by radeon_agp_init */
-               rdev->mc.gtt_location = rdev->mc.agp_base;
-               tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
-               /* Try to put vram before or after AGP because we
-                * we want SYSTEM_APERTURE to cover both VRAM and
-                * AGP so that GPU can catch out of VRAM/AGP access
-                */
-               if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-                       /* Enough place before */
-                       rdev->mc.vram_location = rdev->mc.gtt_location -
-                                                       rdev->mc.mc_vram_size;
-               } else if (tmp > rdev->mc.mc_vram_size) {
-                       /* Enough place after */
-                       rdev->mc.vram_location = rdev->mc.gtt_location +
-                                                       rdev->mc.gtt_size;
-               } else {
-                       /* Try to setup VRAM then AGP might not
-                        * not work on some card
-                        */
-                       rdev->mc.vram_location = 0x00000000UL;
-                       rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-               }
-       } else {
-               rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-               rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
-                                                       0xFFFF) << 24;
-               tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-               if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
-                       /* Enough place after vram */
-                       rdev->mc.gtt_location = tmp;
-               } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
-                       /* Enough place before vram */
-                       rdev->mc.gtt_location = 0;
-               } else {
-                       /* Not enough place after or before shrink
-                        * gart size
-                        */
-                       if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
-                               rdev->mc.gtt_location = 0;
-                               rdev->mc.gtt_size = rdev->mc.vram_location;
-                       } else {
-                               rdev->mc.gtt_location = tmp;
-                               rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
-                       }
-               }
-               rdev->mc.gtt_location = rdev->mc.mc_vram_size;
        }
-       rdev->mc.vram_start = rdev->mc.vram_location;
-       rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       rdev->mc.gtt_start = rdev->mc.gtt_location;
-       rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+       r600_vram_gtt_location(rdev, &rdev->mc);
        /* FIXME: we should enforce default clock in case GPU is not in
         * default setup
         */
        a.full = rfixed_const(100);
        rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
        rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
        if (rdev->flags & RADEON_IS_IGP)
                rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
        return 0;
  }
  
@@@ -981,6 -981,9 +981,9 @@@ void r600_gpu_init(struct radeon_devic
  {
        u32 tiling_config;
        u32 ramcfg;
+       u32 backend_map;
+       u32 cc_rb_backend_disable;
+       u32 cc_gc_shader_pipe_config;
        u32 tmp;
        int i, j;
        u32 sq_config;
        default:
                break;
        }
+       rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+       rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= GROUP_SIZE(0);
+       rdev->config.r600.tiling_group_size = 256;
        tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
        if (tmp > 3) {
                tiling_config |= ROW_TILING(3);
                tiling_config |= SAMPLE_SPLIT(tmp);
        }
        tiling_config |= BANK_SWAPS(1);
-       tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
-                                               rdev->config.r600.max_backends,
-                                               (0xff << rdev->config.r600.max_backends) & 0xff);
-       tiling_config |= BACKEND_MAP(tmp);
+       cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+       cc_rb_backend_disable |=
+               BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+       cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+       cc_gc_shader_pipe_config |=
+               INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
+       cc_gc_shader_pipe_config |=
+               INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+       backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
+                                                       (R6XX_MAX_BACKENDS -
+                                                        r600_count_pipe_bits((cc_rb_backend_disable &
+                                                                              R6XX_MAX_BACKENDS_MASK) >> 16)),
+                                                       (cc_rb_backend_disable >> 16));
+       tiling_config |= BACKEND_MAP(backend_map);
        WREG32(GB_TILING_CONFIG, tiling_config);
        WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
        WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
  
-       tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-       WREG32(CC_RB_BACKEND_DISABLE, tmp);
        /* Setup pipes */
-       tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
-       tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-       WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
-       WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
+       WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+       WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
  
-       tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
+       tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
        WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
  
@@@ -1783,12 -1798,17 +1798,17 @@@ void r600_fence_ring_emit(struct radeon
                          struct radeon_fence *fence)
  {
        /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
+       radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+       radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
+       /* wait for 3D idle clean */
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
        /* Emit fence sequence & fire IRQ */
        radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
        radeon_ring_write(rdev, fence->seq);
-       radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
-       radeon_ring_write(rdev, 1);
        /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
        radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
        radeon_ring_write(rdev, RB_INT_STAT);
@@@ -1950,13 -1970,6 +1970,13 @@@ int r600_resume(struct radeon_device *r
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
 +
 +      r = r600_audio_init(rdev);
 +      if (r) {
 +              DRM_ERROR("radeon: audio resume failed\n");
 +              return r;
 +      }
 +
        return r;
  }
  
@@@ -1964,7 -1977,6 +1984,7 @@@ int r600_suspend(struct radeon_device *
  {
        int r;
  
 +      r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
@@@ -2745,6 -2757,7 +2765,7 @@@ restart_ih
                        case 0: /* D1 vblank */
                                if (disp_int & LB_D1_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 0);
+                                       wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D1_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D1 vblank\n");
                                }
                        case 0: /* D2 vblank */
                                if (disp_int & LB_D2_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 1);
+                                       wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D2_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D2 vblank\n");
                                }
index 0dcb6904c4ff930bbaaa4bc81e2da930ecdc6af8,d7f6909afc0180b12277c5c1c4cd2217121ae6fd..387abaa275a41e39d7e347ddd14e4f906d1b3046
@@@ -146,6 -146,15 +146,15 @@@ static void r600_audio_update_hdmi(unsi
                jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
  }
  
+ /*
+  * turn on/off audio engine
+  */
+ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+ {
+       DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+       WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
  /*
   * initialize the audio vars and register the update timer
   */
@@@ -154,8 -163,7 +163,7 @@@ int r600_audio_init(struct radeon_devic
        if (!r600_audio_chipset_supported(rdev))
                return 0;
  
-       DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
-       WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+       r600_audio_engine_enable(rdev, radeon_audio);
  
        rdev->audio_channels = -1;
        rdev->audio_rate = -1;
@@@ -262,5 -270,6 +270,7 @@@ void r600_audio_fini(struct radeon_devi
                return;
  
        del_timer(&rdev->audio_timer);
 +      WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+       r600_audio_engine_enable(rdev, false);
  }
index 4d8831548a5fc6f6b8e6f1522836853cf2e10832,6f8619cd1a0d984da13a2fd47483279ddbd90a2a..93783b15c81d8226d9833c730af74250d8e3fb5e
@@@ -159,8 -159,15 +159,15 @@@ static struct radeon_hpd radeon_atom_ge
                                                            struct radeon_gpio_rec *gpio)
  {
        struct radeon_hpd hpd;
+       u32 reg;
+       if (ASIC_IS_DCE4(rdev))
+               reg = EVERGREEN_DC_GPIO_HPD_A;
+       else
+               reg = AVIVO_DC_GPIO_HPD_A;
        hpd.gpio = *gpio;
-       if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+       if (gpio->reg == reg) {
                switch(gpio->mask) {
                case (1 << 0):
                        hpd.hpd = RADEON_HPD_1;
@@@ -206,15 -213,6 +213,15 @@@ static bool radeon_atom_apply_quirks(st
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
  
 +      /* Asrock RS600 board lists the DVI port as HDMI */
 +      if ((dev->pdev->device == 0x7941) &&
 +          (dev->pdev->subsystem_vendor == 0x1849) &&
 +          (dev->pdev->subsystem_device == 0x7941)) {
 +              if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
 +                  (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
 +                      *connector_type = DRM_MODE_CONNECTOR_DVID;
 +      }
 +
        /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
        if ((dev->pdev->device == 0x7941) &&
            (dev->pdev->subsystem_vendor == 0x147b) &&
                        *connector_type = DRM_MODE_CONNECTOR_DVID;
        }
  
 +      /* XFX Pine Group device rv730 reports no VGA DDC lines
 +       * even though they are wired up to record 0x93
 +       */
 +      if ((dev->pdev->device == 0x9498) &&
 +          (dev->pdev->subsystem_vendor == 0x1682) &&
 +          (dev->pdev->subsystem_device == 0x2452)) {
 +              struct radeon_device *rdev = dev->dev_private;
 +              *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
 +      }
        return true;
  }
  
@@@ -574,6 -563,9 +581,9 @@@ bool radeon_get_atom_connector_info_fro
                                ddc_bus.valid = false;
                        }
  
+                       /* needed for aux chan transactions */
+                       ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
                        conn_id = le16_to_cpu(path->usConnObjectId);
  
                        if (!radeon_atom_apply_quirks
@@@ -838,6 -830,7 +848,7 @@@ union firmware_info 
        ATOM_FIRMWARE_INFO_V1_2 info_12;
        ATOM_FIRMWARE_INFO_V1_3 info_13;
        ATOM_FIRMWARE_INFO_V1_4 info_14;
+       ATOM_FIRMWARE_INFO_V2_1 info_21;
  };
  
  bool radeon_atom_get_clock_info(struct drm_device *dev)
        uint8_t frev, crev;
        struct radeon_pll *p1pll = &rdev->clock.p1pll;
        struct radeon_pll *p2pll = &rdev->clock.p2pll;
+       struct radeon_pll *dcpll = &rdev->clock.dcpll;
        struct radeon_pll *spll = &rdev->clock.spll;
        struct radeon_pll *mpll = &rdev->clock.mpll;
        uint16_t data_offset;
                rdev->clock.default_mclk =
                    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
  
+               if (ASIC_IS_DCE4(rdev)) {
+                       rdev->clock.default_dispclk =
+                               le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+                       if (rdev->clock.default_dispclk == 0)
+                               rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+                       rdev->clock.dp_extclk =
+                               le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+               }
+               *dcpll = *p1pll;
                return true;
        }
        return false;
  }
  
@@@ -1091,6 -1096,30 +1114,30 @@@ static struct radeon_atom_ss *radeon_at
        return ss;
  }
  
+ static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
+                                         struct radeon_encoder_atom_dig *lvds)
+ {
+       /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
+       if ((dev->pdev->device == 0x95c4) &&
+           (dev->pdev->subsystem_vendor == 0x1179) &&
+           (dev->pdev->subsystem_device == 0xff50)) {
+               if ((lvds->native_mode.hdisplay == 1280) &&
+                   (lvds->native_mode.vdisplay == 800))
+                       lvds->pll_algo = PLL_ALGO_LEGACY;
+       }
+       /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
+       if ((dev->pdev->device == 0x95c4) &&
+           (dev->pdev->subsystem_vendor == 0x1028) &&
+           (dev->pdev->subsystem_device == 0x029f)) {
+               if ((lvds->native_mode.hdisplay == 1280) &&
+                   (lvds->native_mode.vdisplay == 800))
+                       lvds->pll_algo = PLL_ALGO_LEGACY;
+       }
+ }
  union lvds_info {
        struct _ATOM_LVDS_INFO info;
        struct _ATOM_LVDS_INFO_V12 info_12;
@@@ -1161,6 -1190,21 +1208,21 @@@ struct radeon_encoder_atom_dig *radeon_
  
                lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
  
+               if (ASIC_IS_AVIVO(rdev)) {
+                       if (radeon_new_pll == 0)
+                               lvds->pll_algo = PLL_ALGO_LEGACY;
+                       else
+                               lvds->pll_algo = PLL_ALGO_NEW;
+               } else {
+                       if (radeon_new_pll == 1)
+                               lvds->pll_algo = PLL_ALGO_NEW;
+                       else
+                               lvds->pll_algo = PLL_ALGO_LEGACY;
+               }
+               /* LVDS quirks */
+               radeon_atom_apply_lvds_quirks(dev, lvds);
                encoder->native_mode = lvds->native_mode;
        }
        return lvds;
@@@ -1385,20 -1429,375 +1447,375 @@@ radeon_atombios_get_tv_dac_info(struct 
        return tv_dac;
  }
  
- void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+ union power_info {
+       struct _ATOM_POWERPLAY_INFO info;
+       struct _ATOM_POWERPLAY_INFO_V2 info_2;
+       struct _ATOM_POWERPLAY_INFO_V3 info_3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+ };
+ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
  {
-       DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
-       int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+       u32 misc, misc2 = 0, sclk, mclk;
+       union power_info *power_info;
+       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+       struct _ATOM_PPLIB_STATE *power_state;
+       int num_modes = 0, i, j;
+       int state_index = 0, mode_index = 0;
  
-       args.ucEnable = enable;
+       atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
  
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+       rdev->pm.default_power_state = NULL;
+       if (power_info) {
+               if (frev < 4) {
+                       num_modes = power_info->info.ucNumOfPowerModeEntries;
+                       if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+                               num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+                       for (i = 0; i < num_modes; i++) {
+                               rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+                               switch (frev) {
+                               case 1:
+                                       rdev->pm.power_state[state_index].num_clock_modes = 1;
+                                       rdev->pm.power_state[state_index].clock_info[0].mclk =
+                                               le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+                                       rdev->pm.power_state[state_index].clock_info[0].sclk =
+                                               le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+                                       /* skip invalid modes */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+                                               continue;
+                                       /* skip overclock modes for now */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
+                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+                                               continue;
+                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                               power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+                                       misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+                                       if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_GPIO;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+                                                       radeon_lookup_gpio(rdev,
+                                                       power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+                                               if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               true;
+                                               else
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               false;
+                                       } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_VDDC;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+                                                       power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+                                       }
+                                       /* order matters! */
+                                       if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_POWERSAVE;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_PERFORMANCE;
+                                       if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_DEFAULT;
+                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.power_state[state_index].default_clock_mode =
+                                                       &rdev->pm.power_state[state_index].clock_info[0];
+                                       }
+                                       state_index++;
+                                       break;
+                               case 2:
+                                       rdev->pm.power_state[state_index].num_clock_modes = 1;
+                                       rdev->pm.power_state[state_index].clock_info[0].mclk =
+                                               le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+                                       rdev->pm.power_state[state_index].clock_info[0].sclk =
+                                               le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+                                       /* skip invalid modes */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+                                               continue;
+                                       /* skip overclock modes for now */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
+                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+                                               continue;
+                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                               power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+                                       misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+                                       misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+                                       if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_GPIO;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+                                                       radeon_lookup_gpio(rdev,
+                                                       power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+                                               if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               true;
+                                               else
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               false;
+                                       } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_VDDC;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+                                                       power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+                                       }
+                                       /* order matters! */
+                                       if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_POWERSAVE;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_PERFORMANCE;
+                                       if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                       if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_DEFAULT;
+                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.power_state[state_index].default_clock_mode =
+                                                       &rdev->pm.power_state[state_index].clock_info[0];
+                                       }
+                                       state_index++;
+                                       break;
+                               case 3:
+                                       rdev->pm.power_state[state_index].num_clock_modes = 1;
+                                       rdev->pm.power_state[state_index].clock_info[0].mclk =
+                                               le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+                                       rdev->pm.power_state[state_index].clock_info[0].sclk =
+                                               le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+                                       /* skip invalid modes */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+                                               continue;
+                                       /* skip overclock modes for now */
+                                       if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+                                            rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+                                           (rdev->pm.power_state[state_index].clock_info[0].sclk >
+                                            rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+                                               continue;
+                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                               power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+                                       misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+                                       misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+                                       if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_GPIO;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+                                                       radeon_lookup_gpio(rdev,
+                                                       power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+                                               if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               true;
+                                               else
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+                                                               false;
+                                       } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+                                                       VOLTAGE_VDDC;
+                                               rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+                                                       power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+                                               if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+                                                               true;
+                                                       rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+                                                       power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+                                               }
+                                       }
+                                       /* order matters! */
+                                       if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_POWERSAVE;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                       if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                       if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_PERFORMANCE;
+                                       if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                       if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_DEFAULT;
+                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.power_state[state_index].default_clock_mode =
+                                                       &rdev->pm.power_state[state_index].clock_info[0];
+                                       }
+                                       state_index++;
+                                       break;
+                               }
+                       }
+               } else if (frev == 4) {
+                       for (i = 0; i < power_info->info_4.ucNumStates; i++) {
+                               mode_index = 0;
+                               power_state = (struct _ATOM_PPLIB_STATE *)
+                                       (mode_info->atom_context->bios +
+                                        data_offset +
+                                        le16_to_cpu(power_info->info_4.usStateArrayOffset) +
+                                        i * power_info->info_4.ucStateEntrySize);
+                               non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+                                       (mode_info->atom_context->bios +
+                                        data_offset +
+                                        le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
+                                        (power_state->ucNonClockStateIndex *
+                                         power_info->info_4.ucNonClockSize));
+                               for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
+                                       if (rdev->flags & RADEON_IS_IGP) {
+                                               struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
+                                                       (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
+                                                       (mode_info->atom_context->bios +
+                                                        data_offset +
+                                                        le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+                                                        (power_state->ucClockStateIndices[j] *
+                                                         power_info->info_4.ucClockInfoSize));
+                                               sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
+                                               sclk |= clock_info->ucLowEngineClockHigh << 16;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+                                               /* skip invalid modes */
+                                               if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+                                                       continue;
+                                               /* skip overclock modes for now */
+                                               if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+                                                   rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+                                                       continue;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+                                                       VOLTAGE_SW;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+                                                       clock_info->usVDDC;
+                                               mode_index++;
+                                       } else {
+                                               struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
+                                                       (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
+                                                       (mode_info->atom_context->bios +
+                                                        data_offset +
+                                                        le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+                                                        (power_state->ucClockStateIndices[j] *
+                                                         power_info->info_4.ucClockInfoSize));
+                                               sclk = le16_to_cpu(clock_info->usEngineClockLow);
+                                               sclk |= clock_info->ucEngineClockHigh << 16;
+                                               mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+                                               mclk |= clock_info->ucMemoryClockHigh << 16;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+                                               /* skip invalid modes */
+                                               if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+                                                   (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+                                                       continue;
+                                               /* skip overclock modes for now */
+                                               if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
+                                                    rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+                                                   (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+                                                    rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+                                                       continue;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+                                                       VOLTAGE_SW;
+                                               rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+                                                       clock_info->usVDDC;
+                                               mode_index++;
+                                       }
+                               }
+                               rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+                               if (mode_index) {
+                                       misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+                                       misc2 = le16_to_cpu(non_clock_info->usClassification);
+                                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+                                               ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+                                               ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+                                       switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+                                       case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BATTERY;
+                                               break;
+                                       case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_BALANCED;
+                                               break;
+                                       case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_PERFORMANCE;
+                                               break;
+                                       }
+                                       if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+                                               rdev->pm.power_state[state_index].type =
+                                                       POWER_STATE_TYPE_DEFAULT;
+                                               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+                                               rdev->pm.power_state[state_index].default_clock_mode =
+                                                       &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+                                       }
+                                       state_index++;
+                               }
+                       }
+               }
+       } else {
+               /* XXX figure out some good default low power mode for cards w/out power tables */
+       }
+       if (rdev->pm.default_power_state == NULL) {
+               /* add the default mode */
+               rdev->pm.power_state[state_index].type =
+                       POWER_STATE_TYPE_DEFAULT;
+               rdev->pm.power_state[state_index].num_clock_modes = 1;
+               rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+               rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+               rdev->pm.power_state[state_index].default_clock_mode =
+                       &rdev->pm.power_state[state_index].clock_info[0];
+               rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+               if (rdev->asic->get_pcie_lanes)
+                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
+               else
+                       rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
+               rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+               state_index++;
+       }
+       rdev->pm.num_power_states = state_index;
+       rdev->pm.current_power_state = rdev->pm.default_power_state;
+       rdev->pm.current_clock_mode =
+               rdev->pm.default_power_state->default_clock_mode;
  }
  
- void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
  {
-       ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
-       int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+       DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
  
        args.ucEnable = enable;
  
index 65f81942f3994684e829b79bec2a7a7b1d9422b1,6e9e7b59d67e677b2f949839d4704dbc8238ad78..ee0083f982d8ba2a926688e083ea930716e0de91
@@@ -479,10 -479,8 +479,8 @@@ static enum drm_connector_status radeon
                ret = connector_status_connected;
        else {
                if (radeon_connector->ddc_bus) {
-                       radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
                        radeon_connector->edid = drm_get_edid(&radeon_connector->base,
                                                              &radeon_connector->ddc_bus->adapter);
-                       radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
                        if (radeon_connector->edid)
                                ret = connector_status_connected;
                }
@@@ -580,26 -578,20 +578,21 @@@ static enum drm_connector_status radeon
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder;
        struct drm_encoder_helper_funcs *encoder_funcs;
 -      bool dret;
 +      bool dret = false;
        enum drm_connector_status ret = connector_status_disconnected;
  
        encoder = radeon_best_single_encoder(connector);
        if (!encoder)
                ret = connector_status_disconnected;
  
-       if (radeon_connector->ddc_bus) {
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 -      dret = radeon_ddc_probe(radeon_connector);
++      if (radeon_connector->ddc_bus)
 +              dret = radeon_ddc_probe(radeon_connector);
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
-       }
        if (dret) {
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
                }
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
                radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
  
                if (!radeon_connector->edid) {
                        DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@@ -742,21 -734,15 +735,16 @@@ static enum drm_connector_status radeon
        struct drm_mode_object *obj;
        int i;
        enum drm_connector_status ret = connector_status_disconnected;
 -      bool dret;
 +      bool dret = false;
  
-       if (radeon_connector->ddc_bus) {
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 -      dret = radeon_ddc_probe(radeon_connector);
++      if (radeon_connector->ddc_bus)
 +              dret = radeon_ddc_probe(radeon_connector);
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
-       }
        if (dret) {
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
                }
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
                radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
  
                if (!radeon_connector->edid) {
                        DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
                         * connected and the DVI port disconnected.  If the edid doesn't
                         * say HDMI, vice versa.
                         */
 -                      if (radeon_connector->shared_ddc && connector_status_connected) {
 +                      if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
                                struct drm_device *dev = connector->dev;
                                struct drm_connector *list_connector;
                                struct radeon_connector *list_radeon_connector;
@@@ -952,7 -938,7 +940,7 @@@ static void radeon_dp_connector_destroy
        if (radeon_connector->edid)
                kfree(radeon_connector->edid);
        if (radeon_dig_connector->dp_i2c_bus)
-               radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+               radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
        kfree(radeon_connector->con_priv);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
@@@ -988,12 -974,10 +976,10 @@@ static enum drm_connector_status radeon
                        ret = connector_status_connected;
                }
        } else {
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
                if (radeon_ddc_probe(radeon_connector)) {
                        radeon_dig_connector->dp_sink_type = sink_type;
                        ret = connector_status_connected;
                }
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
        }
  
        return ret;
@@@ -1060,7 -1044,8 +1046,7 @@@ radeon_add_atom_connector(struct drm_de
                        return;
                }
                if (radeon_connector->ddc_bus && i2c_bus->valid) {
 -                      if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
 -                                  sizeof(struct radeon_i2c_bus_rec)) == 0) {
 +                      if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
                                radeon_connector->shared_ddc = true;
                                shared_ddc = true;
                        }
index 6f4a5534a99e55c1b60abc307c92575a7d621909,7ecf5e1b39c1b1c697ec3ab658dd5f5ec525e5c3..b7023fff89eb258b00d6aee84e7632954fe00384
@@@ -36,7 -36,14 +36,14 @@@ static void radeon_lock_cursor(struct d
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        uint32_t cur_lock;
  
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+               if (lock)
+                       cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+               else
+                       cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+               WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+       } else if (ASIC_IS_AVIVO(rdev)) {
                cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
                if (lock)
                        cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@@ -58,7 -65,10 +65,10 @@@ static void radeon_hide_cursor(struct d
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
  
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+               WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+       } else if (ASIC_IS_AVIVO(rdev)) {
                WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
                WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
        } else {
@@@ -81,10 -91,14 +91,14 @@@ static void radeon_show_cursor(struct d
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
  
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+               WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+                      EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+       } else if (ASIC_IS_AVIVO(rdev)) {
                WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
                WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
-                            (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+                      (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
        } else {
                switch (radeon_crtc->crtc_id) {
                case 0:
@@@ -109,7 -123,10 +123,10 @@@ static void radeon_set_cursor(struct dr
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
  
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+       } else if (ASIC_IS_AVIVO(rdev)) {
                if (rdev->family >= CHIP_RV770) {
                        if (radeon_crtc->crtc_id)
                                WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@@ -169,13 -186,17 +186,13 @@@ int radeon_crtc_cursor_set(struct drm_c
  unpin:
        if (radeon_crtc->cursor_bo) {
                radeon_gem_object_unpin(radeon_crtc->cursor_bo);
 -              mutex_lock(&crtc->dev->struct_mutex);
 -              drm_gem_object_unreference(radeon_crtc->cursor_bo);
 -              mutex_unlock(&crtc->dev->struct_mutex);
 +              drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
        }
  
        radeon_crtc->cursor_bo = obj;
        return 0;
  fail:
 -      mutex_lock(&crtc->dev->struct_mutex);
 -      drm_gem_object_unreference(obj);
 -      mutex_unlock(&crtc->dev->struct_mutex);
 +      drm_gem_object_unreference_unlocked(obj);
  
        return 0;
  }
@@@ -197,7 -218,20 +214,20 @@@ int radeon_crtc_cursor_move(struct drm_
                yorigin = CURSOR_HEIGHT - 1;
  
        radeon_lock_cursor(crtc, true);
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
+               /* cursors are offset into the total surface */
+               x += crtc->x;
+               y += crtc->y;
+               DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+               /* XXX: check if evergreen has the same issues as avivo chips */
+               WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+                      ((xorigin ? 0 : x) << 16) |
+                      (yorigin ? 0 : y));
+               WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+               WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+                      ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
+       } else if (ASIC_IS_AVIVO(rdev)) {
                int w = radeon_crtc->cursor_width;
                int i = 0;
                struct drm_crtc *crtc_p;
index 3db82550562652b2e5a4312fb2b70837f7dbca76,e35cc3da8f225f873e3d1e5072c6b72d530f8d6b..ba8d806dcf3939fe91285820793e7b733ffb0541
@@@ -68,6 -68,36 +68,36 @@@ static void avivo_crtc_load_lut(struct 
        WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
  }
  
+ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+ {
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       int i;
+       DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+       WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
+       WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+       WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(EVERGREEN_DC_LUT_30_COLOR,
+                      (radeon_crtc->lut_r[i] << 20) |
+                      (radeon_crtc->lut_g[i] << 10) |
+                      (radeon_crtc->lut_b[i] << 0));
+       }
+ }
  static void legacy_crtc_load_lut(struct drm_crtc *crtc)
  {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@@ -100,7 -130,9 +130,9 @@@ void radeon_crtc_load_lut(struct drm_cr
        if (!crtc->enabled)
                return;
  
-       if (ASIC_IS_AVIVO(rdev))
+       if (ASIC_IS_DCE4(rdev))
+               evergreen_crtc_load_lut(crtc);
+       else if (ASIC_IS_AVIVO(rdev))
                avivo_crtc_load_lut(crtc);
        else
                legacy_crtc_load_lut(crtc);
@@@ -278,7 -310,7 +310,7 @@@ static void radeon_print_display_setup(
                DRM_INFO("  %s\n", connector_names[connector->connector_type]);
                if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
                        DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
 -              if (radeon_connector->ddc_bus)
 +              if (radeon_connector->ddc_bus) {
                        DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
                                 radeon_connector->ddc_bus->rec.mask_clk_reg,
                                 radeon_connector->ddc_bus->rec.mask_data_reg,
                                 radeon_connector->ddc_bus->rec.en_data_reg,
                                 radeon_connector->ddc_bus->rec.y_clk_reg,
                                 radeon_connector->ddc_bus->rec.y_data_reg);
 +              } else {
 +                      if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
 +                          connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
 +                          connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
 +                          connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
 +                          connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
 +                          connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
 +                              DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
 +              }
                DRM_INFO("  Encoders:\n");
                list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                        radeon_encoder = to_radeon_encoder(encoder);
@@@ -361,6 -384,8 +393,8 @@@ static bool radeon_setup_enc_conn(struc
  
  int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
  {
+       struct drm_device *dev = radeon_connector->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
        int ret = 0;
  
        if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
        if (!radeon_connector->ddc_bus)
                return -1;
        if (!radeon_connector->edid) {
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
                radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-               radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
        }
+       /* some servers provide a hardcoded edid in rom for KVMs */
+       if (!radeon_connector->edid)
+               radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
        if (radeon_connector->edid) {
                drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@@ -395,9 -420,7 +429,7 @@@ static int radeon_ddc_dump(struct drm_c
  
        if (!radeon_connector->ddc_bus)
                return -1;
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
        edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
-       radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
        if (edid) {
                kfree(edid);
        }
@@@ -414,13 -437,13 +446,13 @@@ static inline uint32_t radeon_div(uint6
        return n;
  }
  
void radeon_compute_pll(struct radeon_pll *pll,
-                       uint64_t freq,
-                       uint32_t *dot_clock_p,
-                       uint32_t *fb_div_p,
-                       uint32_t *frac_fb_div_p,
-                       uint32_t *ref_div_p,
-                       uint32_t *post_div_p)
static void radeon_compute_pll_legacy(struct radeon_pll *pll,
+                                     uint64_t freq,
+                                     uint32_t *dot_clock_p,
+                                     uint32_t *fb_div_p,
+                                     uint32_t *frac_fb_div_p,
+                                     uint32_t *ref_div_p,
+                                     uint32_t *post_div_p)
  {
        uint32_t min_ref_div = pll->min_ref_div;
        uint32_t max_ref_div = pll->max_ref_div;
        *post_div_p = best_post_div;
  }
  
- void radeon_compute_pll_avivo(struct radeon_pll *pll,
                            uint64_t freq,
-                             uint32_t *dot_clock_p,
-                             uint32_t *fb_div_p,
-                             uint32_t *frac_fb_div_p,
-                             uint32_t *ref_div_p,
-                             uint32_t *post_div_p)
+ static bool
calc_fb_div(struct radeon_pll *pll,
+           uint32_t freq,
+             uint32_t post_div,
+             uint32_t ref_div,
+             uint32_t *fb_div,
+             uint32_t *fb_div_frac)
  {
-       fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
-       fixed20_12 pll_out_max, pll_out_min;
-       fixed20_12 pll_in_max, pll_in_min;
-       fixed20_12 reference_freq;
-       fixed20_12 error, ffreq, a, b;
-       pll_out_max.full = rfixed_const(pll->pll_out_max);
-       pll_out_min.full = rfixed_const(pll->pll_out_min);
-       pll_in_max.full = rfixed_const(pll->pll_in_max);
-       pll_in_min.full = rfixed_const(pll->pll_in_min);
-       reference_freq.full = rfixed_const(pll->reference_freq);
-       do_div(freq, 10);
+       fixed20_12 feedback_divider, a, b;
+       u32 vco_freq;
+       vco_freq = freq * post_div;
+       /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
+       a.full = rfixed_const(pll->reference_freq);
+       feedback_divider.full = rfixed_const(vco_freq);
+       feedback_divider.full = rfixed_div(feedback_divider, a);
+       a.full = rfixed_const(ref_div);
+       feedback_divider.full = rfixed_mul(feedback_divider, a);
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+               /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
+               a.full = rfixed_const(10);
+               feedback_divider.full = rfixed_mul(feedback_divider, a);
+               feedback_divider.full += rfixed_const_half(0);
+               feedback_divider.full = rfixed_floor(feedback_divider);
+               feedback_divider.full = rfixed_div(feedback_divider, a);
+               /* *fb_div = floor(feedback_divider); */
+               a.full = rfixed_floor(feedback_divider);
+               *fb_div = rfixed_trunc(a);
+               /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
+               a.full = rfixed_const(10);
+               b.full = rfixed_mul(feedback_divider, a);
+               feedback_divider.full = rfixed_floor(feedback_divider);
+               feedback_divider.full = rfixed_mul(feedback_divider, a);
+               feedback_divider.full = b.full - feedback_divider.full;
+               *fb_div_frac = rfixed_trunc(feedback_divider);
+       } else {
+               /* *fb_div = floor(feedback_divider + 0.5); */
+               feedback_divider.full += rfixed_const_half(0);
+               feedback_divider.full = rfixed_floor(feedback_divider);
+               *fb_div = rfixed_trunc(feedback_divider);
+               *fb_div_frac = 0;
+       }
+       if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
+               return false;
+       else
+               return true;
+ }
+ static bool
+ calc_fb_ref_div(struct radeon_pll *pll,
+               uint32_t freq,
+               uint32_t post_div,
+               uint32_t *fb_div,
+                 uint32_t *fb_div_frac,
+                 uint32_t *ref_div)
+ {
+       fixed20_12 ffreq, max_error, error, pll_out, a;
+       u32 vco;
        ffreq.full = rfixed_const(freq);
-       error.full = rfixed_const(100 * 100);
+       /* max_error = ffreq * 0.0025; */
+       a.full = rfixed_const(400);
+       max_error.full = rfixed_div(ffreq, a);
  
-       /* max p */
-       p.full = rfixed_div(pll_out_max, ffreq);
-       p.full = rfixed_floor(p);
+       for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
+               if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
+                       vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
+                       vco = vco / ((*ref_div) * 10);
  
-       /* min m */
-       m.full = rfixed_div(reference_freq, pll_in_max);
-       m.full = rfixed_ceil(m);
+                       if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
+                               continue;
  
-       while (1) {
-               n.full = rfixed_div(ffreq, reference_freq);
-               n.full = rfixed_mul(n, m);
-               n.full = rfixed_mul(n, p);
+                       /* pll_out = vco / post_div; */
+                       a.full = rfixed_const(post_div);
+                       pll_out.full = rfixed_const(vco);
+                       pll_out.full = rfixed_div(pll_out, a);
  
-               f_vco.full = rfixed_div(n, m);
-               f_vco.full = rfixed_mul(f_vco, reference_freq);
+                       if (pll_out.full >= ffreq.full) {
+                               error.full = pll_out.full - ffreq.full;
+                               if (error.full <= max_error.full)
+                                       return true;
+                       }
+               }
+       }
+       return false;
+ }
  
-               f_pclk.full = rfixed_div(f_vco, p);
+ static void radeon_compute_pll_new(struct radeon_pll *pll,
+                                  uint64_t freq,
+                                  uint32_t *dot_clock_p,
+                                  uint32_t *fb_div_p,
+                                  uint32_t *frac_fb_div_p,
+                                  uint32_t *ref_div_p,
+                                  uint32_t *post_div_p)
+ {
+       u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
+       u32 best_freq = 0, vco_frequency;
  
-               if (f_pclk.full > ffreq.full)
-                       error.full = f_pclk.full - ffreq.full;
-               else
-                       error.full = ffreq.full - f_pclk.full;
-               error.full = rfixed_div(error, f_pclk);
-               a.full = rfixed_const(100 * 100);
-               error.full = rfixed_mul(error, a);
-               a.full = rfixed_mul(m, p);
-               a.full = rfixed_div(n, a);
-               best_freq.full = rfixed_mul(reference_freq, a);
-               if (rfixed_trunc(error) < 25)
-                       break;
-               a.full = rfixed_const(1);
-               m.full = m.full + a.full;
-               a.full = rfixed_div(reference_freq, m);
-               if (a.full >= pll_in_min.full)
-                       continue;
+       /* freq = freq / 10; */
+       do_div(freq, 10);
  
-               m.full = rfixed_div(reference_freq, pll_in_max);
-               m.full = rfixed_ceil(m);
-               a.full= rfixed_const(1);
-               p.full = p.full - a.full;
-               a.full = rfixed_mul(p, ffreq);
-               if (a.full >= pll_out_min.full)
-                       continue;
-               else {
-                       DRM_ERROR("Unable to find pll dividers\n");
-                       break;
+       if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+               post_div = pll->post_div;
+               if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
+                       goto done;
+               vco_frequency = freq * post_div;
+               if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+                       goto done;
+               if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+                       ref_div = pll->reference_div;
+                       if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+                               goto done;
+                       if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+                               goto done;
+               }
+       } else {
+               for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
+                       if (pll->flags & RADEON_PLL_LEGACY) {
+                               if ((post_div == 5) ||
+                                   (post_div == 7) ||
+                                   (post_div == 9) ||
+                                   (post_div == 10) ||
+                                   (post_div == 11))
+                                       continue;
+                       }
+                       if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+                               continue;
+                       vco_frequency = freq * post_div;
+                       if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+                               continue;
+                       if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+                               ref_div = pll->reference_div;
+                               if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+                                       goto done;
+                               if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+                                       break;
+                       } else {
+                               if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
+                                       break;
+                       }
                }
        }
  
-       a.full = rfixed_const(10);
-       b.full = rfixed_mul(n, a);
+       best_freq = pll->reference_freq * 10 * fb_div;
+       best_freq += pll->reference_freq * fb_div_frac;
+       best_freq = best_freq / (ref_div * post_div);
  
-       frac_n.full = rfixed_floor(n);
-       frac_n.full = rfixed_mul(frac_n, a);
-       frac_n.full = b.full - frac_n.full;
+ done:
+       if (best_freq == 0)
+               DRM_ERROR("Couldn't find valid PLL dividers\n");
  
-       *dot_clock_p = rfixed_trunc(best_freq);
-       *fb_div_p = rfixed_trunc(n);
-       *frac_fb_div_p = rfixed_trunc(frac_n);
-       *ref_div_p = rfixed_trunc(m);
-       *post_div_p = rfixed_trunc(p);
+       *dot_clock_p = best_freq / 10;
+       *fb_div_p = fb_div;
+       *frac_fb_div_p = fb_div_frac;
+       *ref_div_p = ref_div;
+       *post_div_p = post_div;
  
-       DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+       DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+ }
+ void radeon_compute_pll(struct radeon_pll *pll,
+                       uint64_t freq,
+                       uint32_t *dot_clock_p,
+                       uint32_t *fb_div_p,
+                       uint32_t *frac_fb_div_p,
+                       uint32_t *ref_div_p,
+                       uint32_t *post_div_p)
+ {
+       switch (pll->algo) {
+       case PLL_ALGO_NEW:
+               radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
+                                      frac_fb_div_p, ref_div_p, post_div_p);
+               break;
+       case PLL_ALGO_LEGACY:
+       default:
+               radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
+                                         frac_fb_div_p, ref_div_p, post_div_p);
+               break;
+       }
  }
  
  static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
        if (fb->fbdev)
                radeonfb_remove(dev, fb);
  
 -      if (radeon_fb->obj) {
 -              mutex_lock(&dev->struct_mutex);
 -              drm_gem_object_unreference(radeon_fb->obj);
 -              mutex_unlock(&dev->struct_mutex);
 -      }
 +      if (radeon_fb->obj)
 +              drm_gem_object_unreference_unlocked(radeon_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(radeon_fb);
  }
@@@ -816,7 -941,7 +947,7 @@@ static int radeon_modeset_create_props(
  
  int radeon_modeset_init(struct radeon_device *rdev)
  {
-       int num_crtc = 2, i;
+       int i;
        int ret;
  
        drm_mode_config_init(rdev->ddev);
                return ret;
        }
  
+       /* check combios for a valid hardcoded EDID - Sun servers */
+       if (!rdev->is_atom_bios) {
+               /* check for hardcoded EDID in BIOS */
+               radeon_combios_check_hardcoded_edid(rdev);
+       }
        if (rdev->flags & RADEON_SINGLE_CRTC)
-               num_crtc = 1;
+               rdev->num_crtc = 1;
+       else {
+               if (ASIC_IS_DCE4(rdev))
+                       rdev->num_crtc = 6;
+               else
+                       rdev->num_crtc = 2;
+       }
  
        /* allocate crtcs */
-       for (i = 0; i < num_crtc; i++) {
+       for (i = 0; i < rdev->num_crtc; i++) {
                radeon_crtc_init(rdev->ddev, i);
        }
  
  
  void radeon_modeset_fini(struct radeon_device *rdev)
  {
+       kfree(rdev->mode_info.bios_hardcoded_edid);
        if (rdev->mode_info.mode_config_initialized) {
                radeon_hpd_fini(rdev);
                drm_mode_config_cleanup(rdev->ddev);
index c57ad606504dfcb3cee7b66103aac1ab834daec9,f6d20cee5705670eed083ffd24c5e2466c06e1b3..88f4d8669d845fa32ee8253cf852af246c168283
   * 1.29- R500 3D cmd buffer support
   * 1.30- Add support for occlusion queries
   * 1.31- Add support for num Z pipes from GET_PARAM
 + * 1.32- fixes for rv740 setup
   */
  #define DRIVER_MAJOR          1
 -#define DRIVER_MINOR          31
 +#define DRIVER_MINOR          32
  #define DRIVER_PATCHLEVEL     0
  
  enum radeon_cp_microcode_version {
@@@ -295,6 -294,9 +295,9 @@@ typedef struct drm_radeon_private 
        int r700_sc_prim_fifo_size;
        int r700_sc_hiz_tile_fifo_size;
        int r700_sc_earlyz_tile_fifo_fize;
+       int r600_group_size;
+       int r600_npipes;
+       int r600_nbanks;
  
        struct mutex cs_mutex;
        u32 cs_id_scnt;
@@@ -310,9 -312,11 +313,11 @@@ typedef struct drm_radeon_buf_priv 
        u32 age;
  } drm_radeon_buf_priv_t;
  
+ struct drm_buffer;
  typedef struct drm_radeon_kcmd_buffer {
        int bufsz;
-       char *buf;
+       struct drm_buffer *buffer;
        int nbox;
        struct drm_clip_rect __user *boxes;
  } drm_radeon_kcmd_buffer_t;
@@@ -2122,4 -2126,32 +2127,32 @@@ extern void radeon_commit_ring(drm_rade
        write &= mask;                                          \
  } while (0)
  
+ /**
+  * Copy given number of dwords from drm buffer to the ring buffer.
+  */
+ #define OUT_RING_DRM_BUFFER(buf, sz) do {                             \
+       int _size = (sz) * 4;                                           \
+       struct drm_buffer *_buf = (buf);                                \
+       int _part_size;                                                 \
+       while (_size > 0) {                                             \
+               _part_size = _size;                                     \
+                                                                       \
+               if (write + _part_size/4 > mask)                        \
+                       _part_size = ((mask + 1) - write)*4;            \
+                                                                       \
+               if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE)    \
+                       _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+                                                                       \
+                                                                       \
+                                                                       \
+               memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
+                       [drm_buffer_index(_buf)], _part_size);          \
+                                                                       \
+               _size -= _part_size;                                    \
+               write = (write + _part_size/4) & mask;                  \
+               drm_buffer_advance(_buf, _part_size);                   \
+       }                                                               \
+ } while (0)
  #endif                                /* __RADEON_DRV_H__ */
index d71e346e9ab5a9faa96d62a96ad35cb84cfce20d,c39ddda138403e430770467c1ece97f134720ebd..cda112cc7a6c3de59561573932636b6b2396c07e
@@@ -59,7 -59,7 +59,7 @@@ static struct fb_ops radeonfb_ops = 
  };
  
  /**
 - * Curretly it is assumed that the old framebuffer is reused.
 + * Currently it is assumed that the old framebuffer is reused.
   *
   * LOCKING
   * caller should hold the mode config lock.
@@@ -148,7 -148,6 +148,6 @@@ int radeonfb_create(struct drm_device *
        unsigned long tmp;
        bool fb_tiled = false; /* useful for testing */
        u32 tiling_flags = 0;
-       int crtc_count;
  
        mode_cmd.width = surface_width;
        mode_cmd.height = surface_height;
        rfbdev = info->par;
        rfbdev->helper.funcs = &radeon_fb_helper_funcs;
        rfbdev->helper.dev = dev;
-       if (rdev->flags & RADEON_SINGLE_CRTC)
-               crtc_count = 1;
-       else
-               crtc_count = 2;
-       ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
+       ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
                                            RADEONFB_CONN_LIMIT);
        if (ret)
                goto out_unref;
  
 -      memset_io(fbptr, 0xff, aligned_size);
 +      memset_io(fbptr, 0x0, aligned_size);
  
        strcpy(info->fix.id, "radeondrmfb");
  
        info->flags = FBINFO_DEFAULT;
        info->fbops = &radeonfb_ops;
  
-       tmp = fb_gpuaddr - rdev->mc.vram_location;
+       tmp = fb_gpuaddr - rdev->mc.vram_start;
        info->fix.smem_start = rdev->mc.aper_base + tmp;
        info->fix.smem_len = size;
        info->screen_base = fbptr;
index 6579eb4c1f287007a3f22179c4593db621d12228,38fa144293201573e389c521d05f6506c13a9387..e50513a627351983edbea71e2d369e959434e069
  
  int radeon_debugfs_ib_init(struct radeon_device *rdev);
  
+ void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
+ {
+       struct radeon_ib *ib, *n;
+       list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+               list_del(&ib->list);
+               vfree(ib->ptr);
+               kfree(ib);
+       }
+ }
+ void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
+       struct radeon_ib *bib;
+       bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+       if (bib == NULL)
+               return;
+       bib->ptr = vmalloc(ib->length_dw * 4);
+       if (bib->ptr == NULL) {
+               kfree(bib);
+               return;
+       }
+       memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+       bib->length_dw = ib->length_dw;
+       mutex_lock(&rdev->ib_pool.mutex);
+       list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+       mutex_unlock(&rdev->ib_pool.mutex);
+ }
  /*
   * IB.
   */
@@@ -100,8 -130,6 +130,8 @@@ void radeon_ib_free(struct radeon_devic
        if (tmp == NULL) {
                return;
        }
 +      if (!tmp->fence->emited)
 +              radeon_fence_unref(&tmp->fence);
        mutex_lock(&rdev->ib_pool.mutex);
        tmp->free = true;
        mutex_unlock(&rdev->ib_pool.mutex);
@@@ -142,6 -170,7 +172,7 @@@ int radeon_ib_pool_init(struct radeon_d
  
        if (rdev->ib_pool.robj)
                return 0;
+       INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
        /* Allocate 1M object buffer */
        r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
                                true, RADEON_GEM_DOMAIN_GTT,
@@@ -192,6 -221,7 +223,8 @@@ void radeon_ib_pool_fini(struct radeon_
                return;
        }
        mutex_lock(&rdev->ib_pool.mutex);
+       radeon_ib_bogus_cleanup(rdev);
++
        if (rdev->ib_pool.robj) {
                r = radeon_bo_reserve(rdev->ib_pool.robj, false);
                if (likely(r == 0)) {
@@@ -349,15 -379,49 +382,49 @@@ static int radeon_debugfs_ib_info(struc
        return 0;
  }
  
+ static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+ {
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct radeon_device *rdev = node->info_ent->data;
+       struct radeon_ib *ib;
+       unsigned i;
+       mutex_lock(&rdev->ib_pool.mutex);
+       if (list_empty(&rdev->ib_pool.bogus_ib)) {
+               mutex_unlock(&rdev->ib_pool.mutex);
+               seq_printf(m, "no bogus IB recorded\n");
+               return 0;
+       }
+       ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+       list_del_init(&ib->list);
+       mutex_unlock(&rdev->ib_pool.mutex);
+       seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+       for (i = 0; i < ib->length_dw; i++) {
+               seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+       }
+       vfree(ib->ptr);
+       kfree(ib);
+       return 0;
+ }
  static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
  static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+ static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+       {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+ };
  #endif
  
  int radeon_debugfs_ib_init(struct radeon_device *rdev)
  {
  #if defined(CONFIG_DEBUG_FS)
        unsigned i;
+       int r;
  
+       radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+       r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+       if (r)
+               return r;
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
                radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
index 067167cb39cafa8444b2358cde28d5bc9e96f167,44b6d66b0ab31a6d67a4c3bd5a5ed44a50767529..32971b8272cf9fb61c3ea53a9f5a16b4113bc7b9
@@@ -29,6 -29,7 +29,7 @@@
  
  #include "drmP.h"
  #include "drm.h"
+ #include "drm_buffer.h"
  #include "drm_sarea.h"
  #include "radeon_drm.h"
  #include "radeon_drv.h"
@@@ -91,21 -92,26 +92,26 @@@ static __inline__ int radeon_check_and_
  static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
                                                     dev_priv,
                                                     struct drm_file *file_priv,
-                                                    int id, u32 *data)
+                                                    int id, struct drm_buffer *buf)
  {
+       u32 *data;
        switch (id) {
  
        case RADEON_EMIT_PP_MISC:
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv,
-                   &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+               data = drm_buffer_pointer_to_dword(buf,
+                       (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid depth buffer offset\n");
                        return -EINVAL;
                }
                break;
  
        case RADEON_EMIT_PP_CNTL:
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv,
-                   &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+               data = drm_buffer_pointer_to_dword(buf,
+                       (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid colour buffer offset\n");
                        return -EINVAL;
                }
        case R200_EMIT_PP_TXOFFSET_3:
        case R200_EMIT_PP_TXOFFSET_4:
        case R200_EMIT_PP_TXOFFSET_5:
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv,
-                                                 &data[0])) {
+               data = drm_buffer_pointer_to_dword(buf, 0);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid R200 texture offset\n");
                        return -EINVAL;
                }
        case RADEON_EMIT_PP_TXFILTER_0:
        case RADEON_EMIT_PP_TXFILTER_1:
        case RADEON_EMIT_PP_TXFILTER_2:
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv,
-                   &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+               data = drm_buffer_pointer_to_dword(buf,
+                       (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
                        DRM_ERROR("Invalid R100 texture offset\n");
                        return -EINVAL;
                }
        case R200_EMIT_PP_CUBIC_OFFSETS_5:{
                        int i;
                        for (i = 0; i < 5; i++) {
+                               data = drm_buffer_pointer_to_dword(buf, i);
                                if (radeon_check_and_fixup_offset(dev_priv,
                                                                  file_priv,
-                                                                 &data[i])) {
+                                                                 data)) {
                                        DRM_ERROR
                                            ("Invalid R200 cubic texture offset\n");
                                        return -EINVAL;
        case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
                        int i;
                        for (i = 0; i < 5; i++) {
+                               data = drm_buffer_pointer_to_dword(buf, i);
                                if (radeon_check_and_fixup_offset(dev_priv,
                                                                  file_priv,
-                                                                 &data[i])) {
+                                                                 data)) {
                                        DRM_ERROR
                                            ("Invalid R100 cubic texture offset\n");
                                        return -EINVAL;
@@@ -269,23 -278,24 +278,24 @@@ static __inline__ int radeon_check_and_
                                                     cmdbuf,
                                                     unsigned int *cmdsz)
  {
-       u32 *cmd = (u32 *) cmdbuf->buf;
+       u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
        u32 offset, narrays;
        int count, i, k;
  
-       *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+       count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+       *cmdsz = 2 + count;
  
-       if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+       if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
                DRM_ERROR("Not a type 3 packet\n");
                return -EINVAL;
        }
  
-       if (4 * *cmdsz > cmdbuf->bufsz) {
+       if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
                DRM_ERROR("Packet size larger than size of data provided\n");
                return -EINVAL;
        }
  
-       switch(cmd[0] & 0xff00) {
+       switch (*cmd & 0xff00) {
        /* XXX Are there old drivers needing other packets? */
  
        case RADEON_3D_DRAW_IMMD:
                break;
  
        case RADEON_3D_LOAD_VBPNTR:
-               count = (cmd[0] >> 16) & 0x3fff;
  
                if (count > 18) { /* 12 arrays max */
                        DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
                }
  
                /* carefully check packet contents */
-               narrays = cmd[1] & ~0xc000;
+               cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+               narrays = *cmd & ~0xc000;
                k = 0;
                i = 2;
                while ((k < narrays) && (i < (count + 2))) {
                        i++;            /* skip attribute field */
+                       cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
                        if (radeon_check_and_fixup_offset(dev_priv, file_priv,
-                                                         &cmd[i])) {
+                                                         cmd)) {
                                DRM_ERROR
                                    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
                                     k, i);
                        if (k == narrays)
                                break;
                        /* have one more to process, they come in pairs */
+                       cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
                        if (radeon_check_and_fixup_offset(dev_priv,
-                                                         file_priv, &cmd[i]))
+                                                         file_priv, cmd))
                        {
                                DRM_ERROR
                                    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
                        DRM_ERROR("Invalid 3d packet for r200-class chip\n");
                        return -EINVAL;
                }
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
+               cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
                                DRM_ERROR("Invalid rndr_gen_indx offset\n");
                                return -EINVAL;
                }
                        DRM_ERROR("Invalid 3d packet for r100-class chip\n");
                        return -EINVAL;
                }
-               if ((cmd[1] & 0x8000ffff) != 0x80000810) {
-                       DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+               cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+               if ((*cmd & 0x8000ffff) != 0x80000810) {
+                       DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
                        return -EINVAL;
                }
-               if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
-                       DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+               cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+               if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+                       DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
                        return -EINVAL;
                }
                break;
        case RADEON_CNTL_PAINT_MULTI:
        case RADEON_CNTL_BITBLT_MULTI:
                /* MSB of opcode: next DWORD GUI_CNTL */
-               if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+               cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+               if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
                              | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
-                       offset = cmd[2] << 10;
+                       u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+                       offset = *cmd2 << 10;
                        if (radeon_check_and_fixup_offset
                            (dev_priv, file_priv, &offset)) {
                                DRM_ERROR("Invalid first packet offset\n");
                                return -EINVAL;
                        }
-                       cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+                       *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
                }
  
-               if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
-                   (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
-                       offset = cmd[3] << 10;
+               if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+                   (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+                       u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+                       offset = *cmd << 10;
                        if (radeon_check_and_fixup_offset
                            (dev_priv, file_priv, &offset)) {
                                DRM_ERROR("Invalid second packet offset\n");
                                return -EINVAL;
                        }
-                       cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+                       *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
                }
                break;
  
        default:
-               DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+               DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
                return -EINVAL;
        }
  
@@@ -1950,7 -1972,7 +1972,7 @@@ static void radeon_apply_surface_regs(i
   * Note that refcount can be at most 2, since during a free refcount=3
   * might mean we have to allocate a new surface which might not always
   * be available.
 - * For example : we allocate three contigous surfaces ABC. If B is
 + * For example : we allocate three contiguous surfaces ABC. If B is
   * freed, we suddenly need two surfaces to store A and C, which might
   * not always be available.
   */
@@@ -2611,7 -2633,6 +2633,6 @@@ static int radeon_emit_packets(drm_rade
  {
        int id = (int)header.packet.packet_id;
        int sz, reg;
-       int *data = (int *)cmdbuf->buf;
        RING_LOCALS;
  
        if (id >= RADEON_MAX_STATE_PACKETS)
        sz = packet[id].len;
        reg = packet[id].start;
  
-       if (sz * sizeof(int) > cmdbuf->bufsz) {
+       if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
                DRM_ERROR("Packet size provided larger than data provided\n");
                return -EINVAL;
        }
  
-       if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
+       if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+                               cmdbuf->buffer)) {
                DRM_ERROR("Packet verification failed\n");
                return -EINVAL;
        }
  
        BEGIN_RING(sz + 1);
        OUT_RING(CP_PACKET0(reg, (sz - 1)));
-       OUT_RING_TABLE(data, sz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
-       cmdbuf->buf += sz * sizeof(int);
-       cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2653,10 -2673,8 +2673,8 @@@ static __inline__ int radeon_emit_scala
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
-       OUT_RING_TABLE(cmdbuf->buf, sz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
-       cmdbuf->buf += sz * sizeof(int);
-       cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2675,10 -2693,8 +2693,8 @@@ static __inline__ int radeon_emit_scala
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
-       OUT_RING_TABLE(cmdbuf->buf, sz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
-       cmdbuf->buf += sz * sizeof(int);
-       cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2696,11 -2712,9 +2712,9 @@@ static __inline__ int radeon_emit_vecto
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
        OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
-       OUT_RING_TABLE(cmdbuf->buf, sz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
-       cmdbuf->buf += sz * sizeof(int);
-       cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2714,7 -2728,7 +2728,7 @@@ static __inline__ int radeon_emit_vecli
  
          if (!sz)
                  return 0;
-         if (sz * 4 > cmdbuf->bufsz)
+       if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
                  return -EINVAL;
  
        BEGIN_RING(5 + sz);
        OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
        OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
        OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
-       OUT_RING_TABLE(cmdbuf->buf, sz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
        ADVANCE_RING();
  
-       cmdbuf->buf += sz * sizeof(int);
-       cmdbuf->bufsz -= sz * sizeof(int);
        return 0;
  }
  
@@@ -2748,11 -2760,9 +2760,9 @@@ static int radeon_emit_packet3(struct d
        }
  
        BEGIN_RING(cmdsz);
-       OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+       OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
        ADVANCE_RING();
  
-       cmdbuf->buf += cmdsz * 4;
-       cmdbuf->bufsz -= cmdsz * 4;
        return 0;
  }
  
@@@ -2805,16 -2815,16 +2815,16 @@@ static int radeon_emit_packet3_cliprect
                }
  
                BEGIN_RING(cmdsz);
-               OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+               OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
                ADVANCE_RING();
  
        } while (++i < cmdbuf->nbox);
        if (cmdbuf->nbox == 1)
                cmdbuf->nbox = 0;
  
+       return 0;
        out:
-       cmdbuf->buf += cmdsz * 4;
-       cmdbuf->bufsz -= cmdsz * 4;
+       drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
        return 0;
  }
  
@@@ -2847,16 -2857,16 +2857,16 @@@ static int radeon_emit_wait(struct drm_
        return 0;
  }
  
- static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+               struct drm_file *file_priv)
  {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf = NULL;
+       drm_radeon_cmd_header_t stack_header;
        int idx;
        drm_radeon_kcmd_buffer_t *cmdbuf = data;
-       drm_radeon_cmd_header_t header;
-       int orig_nbox, orig_bufsz;
-       char *kbuf = NULL;
+       int orig_nbox;
  
        LOCK_TEST_WITH_RETURN(dev, file_priv);
  
         * races between checking values and using those values in other code,
         * and simply to avoid a lot of function calls to copy in data.
         */
-       orig_bufsz = cmdbuf->bufsz;
-       if (orig_bufsz != 0) {
-               kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
-               if (kbuf == NULL)
-                       return -ENOMEM;
-               if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
-                                      cmdbuf->bufsz)) {
-                       kfree(kbuf);
-                       return -EFAULT;
-               }
-               cmdbuf->buf = kbuf;
+       if (cmdbuf->bufsz != 0) {
+               int rv;
+               void __user *buffer = cmdbuf->buffer;
+               rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+               if (rv)
+                       return rv;
+               rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+                                               cmdbuf->bufsz);
+               if (rv)
+                       return rv;
        }
  
        orig_nbox = cmdbuf->nbox;
                int temp;
                temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
  
-               if (orig_bufsz != 0)
-                       kfree(kbuf);
+               if (cmdbuf->bufsz != 0)
+                       drm_buffer_free(cmdbuf->buffer);
  
                return temp;
        }
  
        /* microcode_version != r300 */
-       while (cmdbuf->bufsz >= sizeof(header)) {
+       while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
  
-               header.i = *(int *)cmdbuf->buf;
-               cmdbuf->buf += sizeof(header);
-               cmdbuf->bufsz -= sizeof(header);
+               drm_radeon_cmd_header_t *header;
+               header = drm_buffer_read_object(cmdbuf->buffer,
+                               sizeof(stack_header), &stack_header);
  
-               switch (header.header.cmd_type) {
+               switch (header->header.cmd_type) {
                case RADEON_CMD_PACKET:
                        DRM_DEBUG("RADEON_CMD_PACKET\n");
                        if (radeon_emit_packets
-                           (dev_priv, file_priv, header, cmdbuf)) {
+                           (dev_priv, file_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_packets failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_SCALARS:
                        DRM_DEBUG("RADEON_CMD_SCALARS\n");
-                       if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
+                       if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_scalars failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_VECTORS:
                        DRM_DEBUG("RADEON_CMD_VECTORS\n");
-                       if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
+                       if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_vectors failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_DMA_DISCARD:
                        DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
-                       idx = header.dma.buf_idx;
+                       idx = header->dma.buf_idx;
                        if (idx < 0 || idx >= dma->buf_count) {
                                DRM_ERROR("buffer index %d (of %d max)\n",
                                          idx, dma->buf_count - 1);
  
                case RADEON_CMD_SCALARS2:
                        DRM_DEBUG("RADEON_CMD_SCALARS2\n");
-                       if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
+                       if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_scalars2 failed\n");
                                goto err;
                        }
  
                case RADEON_CMD_WAIT:
                        DRM_DEBUG("RADEON_CMD_WAIT\n");
-                       if (radeon_emit_wait(dev, header.wait.flags)) {
+                       if (radeon_emit_wait(dev, header->wait.flags)) {
                                DRM_ERROR("radeon_emit_wait failed\n");
                                goto err;
                        }
                        break;
                case RADEON_CMD_VECLINEAR:
                        DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
-                       if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
+                       if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
                                DRM_ERROR("radeon_emit_veclinear failed\n");
                                goto err;
                        }
                        break;
  
                default:
-                       DRM_ERROR("bad cmd_type %d at %p\n",
-                                 header.header.cmd_type,
-                                 cmdbuf->buf - sizeof(header));
+                       DRM_ERROR("bad cmd_type %d at byte %d\n",
+                                 header->header.cmd_type,
+                                 cmdbuf->buffer->iterator);
                        goto err;
                }
        }
  
-       if (orig_bufsz != 0)
-               kfree(kbuf);
+       if (cmdbuf->bufsz != 0)
+               drm_buffer_free(cmdbuf->buffer);
  
        DRM_DEBUG("DONE\n");
        COMMIT_RING();
        return 0;
  
        err:
-       if (orig_bufsz != 0)
-               kfree(kbuf);
+       if (cmdbuf->bufsz != 0)
+               drm_buffer_free(cmdbuf->buffer);
        return -EINVAL;
  }
  
index 58b5adf974ca8bac6434025e3962443af28c0d3b,1157e0f758fa17588953ec6aeaabfa0a577af063..43c5ab34b634bf78383b940e3e2bafdda4c5d89e
@@@ -150,7 -150,7 +150,7 @@@ static int radeon_init_mem_type(struct 
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_TT:
-               man->gpu_offset = rdev->mc.gtt_location;
+               man->gpu_offset = rdev->mc.gtt_start;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
-               man->gpu_offset = rdev->mc.vram_location;
+               man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
@@@ -262,10 -262,10 +262,10 @@@ static int radeon_move_blit(struct ttm_
  
        switch (old_mem->mem_type) {
        case TTM_PL_VRAM:
-               old_start += rdev->mc.vram_location;
+               old_start += rdev->mc.vram_start;
                break;
        case TTM_PL_TT:
-               old_start += rdev->mc.gtt_location;
+               old_start += rdev->mc.gtt_start;
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
        }
        switch (new_mem->mem_type) {
        case TTM_PL_VRAM:
-               new_start += rdev->mc.vram_location;
+               new_start += rdev->mc.vram_start;
                break;
        case TTM_PL_TT:
-               new_start += rdev->mc.gtt_location;
+               new_start += rdev->mc.gtt_start;
                break;
        default:
                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@@ -409,7 -409,7 +409,7 @@@ static int radeon_bo_move(struct ttm_bu
             new_mem->mem_type == TTM_PL_SYSTEM) ||
            (old_mem->mem_type == TTM_PL_SYSTEM &&
             new_mem->mem_type == TTM_PL_TT)) {
 -              /* bind is enought */
 +              /* bind is enough */
                radeon_move_null(bo, new_mem);
                return 0;
        }