dmaengine: remove dma_async_memcpy_pending() macro
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Thu, 8 Nov 2012 09:59:54 +0000 (09:59 +0000)
committerVinod Koul <vinod.koul@intel.com>
Tue, 8 Jan 2013 06:05:09 +0000 (22:05 -0800)
Just use dma_async_issue_pending() directly.

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <djbw@fb.com>
drivers/misc/carma/carma-fpga-program.c
drivers/misc/carma/carma-fpga.c
include/linux/dmaengine.h
net/ipv4/tcp.c

index eaddfe9db149a8f070ec11e5ae78c4e8821f175d..736c7714f5657000a5b82380f2928a1b6c89fe7c 100644 (file)
@@ -546,7 +546,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
                goto out_dma_unmap;
        }
 
-       dma_async_memcpy_issue_pending(chan);
+       dma_async_issue_pending(chan);
 
        /* Set the total byte count */
        fpga_set_byte_count(priv->regs, priv->bytes);
index 6b43f8c7b3beed7a4afb1a4d7686a5f14b3f0289..7508cafff1039a4d984fae009eec21fdc5863763 100644 (file)
@@ -751,7 +751,7 @@ static irqreturn_t data_irq(int irq, void *dev_id)
        submitted = true;
 
        /* Start the DMA Engine */
-       dma_async_memcpy_issue_pending(priv->chan);
+       dma_async_issue_pending(priv->chan);
 
 out:
        /* If no DMA was submitted, re-enable interrupts */
index be6e95395b1159670ca16f325b2fd7e0f540dc26..cd15958d4d1d51e90159fbb9751f5277fcf9d630 100644 (file)
@@ -901,8 +901,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
        chan->device->device_issue_pending(chan);
 }
 
-#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
-
 /**
  * dma_async_is_tx_complete - poll for transaction completion
  * @chan: DMA channel
index 1ca253635f7acdda23624c28ae1ead4645bd0dd5..cf949a119a54e4f683599675ce8006e6d233ef21 100644 (file)
@@ -1406,7 +1406,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
                return;
 
        last_issued = tp->ucopy.dma_cookie;
-       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+       dma_async_issue_pending(tp->ucopy.dma_chan);
 
        do {
                if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
@@ -1744,7 +1744,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                tcp_service_net_dma(sk, true);
                                tcp_cleanup_rbuf(sk, copied);
                        } else
-                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+                               dma_async_issue_pending(tp->ucopy.dma_chan);
                }
 #endif
                if (copied >= target) {
@@ -1837,7 +1837,7 @@ do_prequeue:
                                        break;
                                }
 
-                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+                               dma_async_issue_pending(tp->ucopy.dma_chan);
 
                                if ((offset + used) == skb->len)
                                        copied_early = true;