Merge tag 'dmaengine-5.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma
[sfrench/cifs-2.6.git] / drivers / dma / iop-adma.c
index 003b753e46043fda880347243b92f00468186fef..a3f942a6a946ab7c40196f6152891b62302a5e3a 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/prefetch.h>
 #include <linux/memory.h>
 #include <linux/ioport.h>
 #include <linux/raid/pq.h>
 #include <linux/slab.h>
 
-#include <mach/adma.h>
-
+#include "iop-adma.h"
 #include "dmaengine.h"
 
 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
@@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
        list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
                                        chain_node) {
                pr_debug("\tcookie: %d slot: %d busy: %d "
-                       "this_desc: %pad next_desc: %#x ack: %d\n",
+                       "this_desc: %pad next_desc: %#llx ack: %d\n",
                        iter->async_tx.cookie, iter->idx, busy,
-                       &iter->async_tx.phys, iop_desc_get_next_desc(iter),
+                       &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
                        async_tx_test_ack(&iter->async_tx));
                prefetch(_iter);
                prefetch(&_iter->async_tx);
@@ -306,9 +306,9 @@ retry:
                                int i;
                                dev_dbg(iop_chan->device->common.dev,
                                        "allocated slot: %d "
-                                       "(desc %p phys: %#x) slots_per_op %d\n",
+                                       "(desc %p phys: %#llx) slots_per_op %d\n",
                                        iter->idx, iter->hw_desc,
-                                       iter->async_tx.phys, slots_per_op);
+                                       (u64)iter->async_tx.phys, slots_per_op);
 
                                /* pre-ack all but the last descriptor */
                                if (num_slots != slots_per_op)
@@ -514,7 +514,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
                return NULL;
        BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 
-       dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
                __func__, len);
 
        spin_lock_bh(&iop_chan->lock);
@@ -547,7 +547,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
        dev_dbg(iop_chan->device->common.dev,
-               "%s src_cnt: %d len: %u flags: %lx\n",
+               "%s src_cnt: %d len: %zu flags: %lx\n",
                __func__, src_cnt, len, flags);
 
        spin_lock_bh(&iop_chan->lock);
@@ -580,7 +580,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
        if (unlikely(!len))
                return NULL;
 
-       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
                __func__, src_cnt, len);
 
        spin_lock_bh(&iop_chan->lock);
@@ -618,7 +618,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
        dev_dbg(iop_chan->device->common.dev,
-               "%s src_cnt: %d len: %u flags: %lx\n",
+               "%s src_cnt: %d len: %zu flags: %lx\n",
                __func__, src_cnt, len, flags);
 
        if (dmaf_p_disabled_continue(flags))
@@ -681,7 +681,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
                return NULL;
        BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 
-       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+       dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
                __func__, src_cnt, len);
 
        spin_lock_bh(&iop_chan->lock);