.can_queue = MV_USE_Q_DEPTH,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = MV_MAX_SG_CT / 2,
- .max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
* @base: port base address
* @pp: port private data
*
- * Verify the local cache of the eDMA state is accurate with an
- * assert.
+ * Verify the local cache of the eDMA state is accurate with a
+ * WARN_ON.
*
* LOCKING:
* Inherited from caller.
writelfl(EDMA_EN, base + EDMA_CMD_OFS);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
- assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
+ WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
}
/**
* mv_stop_dma - Disable eDMA engine
* @ap: ATA channel to manipulate
*
- * Verify the local cache of the eDMA state is accurate with an
- * assert.
+ * Verify the local cache of the eDMA state is accurate with a
+ * WARN_ON.
*
* LOCKING:
* Inherited from caller.
writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
} else {
- assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
+ WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
}
/* now properly wait for the eDMA to stop */
pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- pp->sg_tbl[i].flags_size = cpu_to_le32(len);
+ pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
sg_len -= len;
addr += len;
return;
/* the req producer index should be the same as we remember it */
- assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- pp->req_producer);
+ WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
+ EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ pp->req_producer);
/* Fill in command request block
*/
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
- assert(MV_MAX_Q_DEPTH > qc->tag);
+ WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
pp->crqb[pp->req_producer].sg_addr =
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
break;
#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
return;
/* the req producer index should be the same as we remember it */
- assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- pp->req_producer);
+ WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
+ EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ pp->req_producer);
/* Fill in Gen IIE command request block
*/
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
- assert(MV_MAX_Q_DEPTH > qc->tag);
+ WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
/* the req producer index should be the same as we remember it */
- assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- pp->req_producer);
+ WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ pp->req_producer);
/* until we do queuing, the queue should be empty at this point */
- assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
- EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+ WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
+ EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
mv_inc_q_index(&pp->req_producer); /* now incr producer index */
*
* This routine is for use when the port is in DMA mode, when it
* will be using the CRPB (command response block) method of
- * returning command completion information. We assert indices
+ * returning command completion information. We check indices
* are good, grab status, and bump the response consumer index to
* prove that we're up to date.
*
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
u32 out_ptr;
+ u8 ata_status;
out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
/* the response consumer index should be the same as we remember it */
- assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- pp->rsp_consumer);
+ WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ pp->rsp_consumer);
+
+ ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
/* increment our consumer index... */
pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
/* and, until we do NCQ, there should only be 1 CRPB waiting */
- assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
- EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
- pp->rsp_consumer);
+ WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
+ EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
+ pp->rsp_consumer);
/* write out our inc'd consumer index so EDMA knows we're caught up */
out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
/* Return ATA status register for completed CRPB */
- return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT);
+ return ata_status;
}
/**
{
void __iomem *mmio = host_set->mmio_base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
- struct ata_port *ap;
struct ata_queued_cmd *qc;
u32 hc_irq_cause;
int shift, port, port0, hard_port, handled;
unsigned int err_mask;
- u8 ata_status = 0;
if (hc == 0) {
port0 = 0;
hc,relevant,hc_irq_cause);
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
- ap = host_set->ports[port];
+ u8 ata_status = 0;
+ struct ata_port *ap = host_set->ports[port];
+ struct mv_port_priv *pp = ap->private_data;
+
hard_port = port & MV_PORT_MASK; /* range 0-3 */
handled = 0; /* ensure ata_status is set if handled++ */
- if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
- /* new CRPB on the queue; just one at a time until NCQ
- */
- ata_status = mv_get_crpb_status(ap);
- handled++;
- } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
- /* received ATA IRQ; read the status reg to clear INTRQ
- */
- ata_status = readb((void __iomem *)
+ /* Note that DEV_IRQ might happen spuriously during EDMA,
+ * and should be ignored in such cases. We could mask it,
+ * but it's pretty rare and may not be worth the overhead.
+ */
+ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+ /* EDMA: check for response queue interrupt */
+ if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
+ ata_status = mv_get_crpb_status(ap);
+ handled = 1;
+ }
+ } else {
+ /* PIO: check for device (drive) interrupt */
+ if ((DEV_IRQ << hard_port) & hc_irq_cause) {
+ ata_status = readb((void __iomem *)
ap->ioaddr.status_addr);
- handled++;
+ handled = 1;
+ }
}
- if (ap &&
- (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))
+ if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
continue;
err_mask = ac_err_mask(ata_status);
if ((PORT0_ERR << shift) & relevant) {
mv_err_intr(ap);
err_mask |= AC_ERR_OTHER;
- handled++;
+ handled = 1;
}
- if (handled && ap) {
+ if (handled) {
qc = ata_qc_from_tag(ap, ap->active_tag);
- if (NULL != qc) {
+ if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
VPRINTK("port %u IRQ found for qc, "
"ata_status 0x%x\n", port,ata_status);
/* mark qc status appropriately */
mv_err_intr(ap);
mv_stop_and_reset(ap);
- if (!qc) {
- printk(KERN_ERR "ata%u: BUG: timeout without command\n",
- ap->id);
- } else {
- qc->err_mask |= AC_ERR_TIMEOUT;
- ata_eh_qc_complete(qc);
- }
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ ata_eh_qc_complete(qc);
}
/**