1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2006, Intel Corporation.
7 #include <linux/types.h>
9 #include <mach/hardware.h>
10 #include <asm/hardware/iop_adma.h>
12 /* Memory copy units */
13 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
14 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
15 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
16 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
17 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
18 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
19 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
20 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
21 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
23 /* Application accelerator unit */
24 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
25 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
26 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
27 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
28 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
29 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
30 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
31 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
32 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
33 #define AAU_EDCR0_IDX 8
34 #define AAU_EDCR1_IDX 17
35 #define AAU_EDCR2_IDX 26
41 struct iop3xx_aau_desc_ctrl {
42 unsigned int int_en:1;
43 unsigned int blk1_cmd_ctrl:3;
44 unsigned int blk2_cmd_ctrl:3;
45 unsigned int blk3_cmd_ctrl:3;
46 unsigned int blk4_cmd_ctrl:3;
47 unsigned int blk5_cmd_ctrl:3;
48 unsigned int blk6_cmd_ctrl:3;
49 unsigned int blk7_cmd_ctrl:3;
50 unsigned int blk8_cmd_ctrl:3;
51 unsigned int blk_ctrl:2;
52 unsigned int dual_xor_en:1;
53 unsigned int tx_complete:1;
54 unsigned int zero_result_err:1;
55 unsigned int zero_result_en:1;
56 unsigned int dest_write_en:1;
59 struct iop3xx_aau_e_desc_ctrl {
60 unsigned int reserved:1;
61 unsigned int blk1_cmd_ctrl:3;
62 unsigned int blk2_cmd_ctrl:3;
63 unsigned int blk3_cmd_ctrl:3;
64 unsigned int blk4_cmd_ctrl:3;
65 unsigned int blk5_cmd_ctrl:3;
66 unsigned int blk6_cmd_ctrl:3;
67 unsigned int blk7_cmd_ctrl:3;
68 unsigned int blk8_cmd_ctrl:3;
69 unsigned int reserved2:7;
72 struct iop3xx_dma_desc_ctrl {
73 unsigned int pci_transaction:4;
74 unsigned int int_en:1;
75 unsigned int dac_cycle_en:1;
76 unsigned int mem_to_mem_en:1;
77 unsigned int crc_data_tx_en:1;
78 unsigned int crc_gen_en:1;
79 unsigned int crc_seed_dis:1;
80 unsigned int reserved:21;
81 unsigned int crc_tx_complete:1;
84 struct iop3xx_desc_dma {
92 u32 upper_pci_src_addr;
93 u32 upper_pci_dest_addr;
96 u32 local_pci_src_addr;
97 u32 local_pci_dest_addr;
103 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
108 struct iop3xx_desc_aau {
115 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
120 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
124 struct iop3xx_aau_gfmr {
125 unsigned int gfmr1:8;
126 unsigned int gfmr2:8;
127 unsigned int gfmr3:8;
128 unsigned int gfmr4:8;
131 struct iop3xx_desc_pq_xor {
136 struct iop3xx_aau_gfmr data_mult1_field;
142 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
147 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
149 struct iop3xx_aau_gfmr data_mult_field;
154 struct iop3xx_desc_dual_xor {
164 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
170 struct iop3xx_desc_aau *aau;
171 struct iop3xx_desc_dma *dma;
172 struct iop3xx_desc_pq_xor *pq_xor;
173 struct iop3xx_desc_dual_xor *dual_xor;
177 /* No support for p+q operations */
179 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
186 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
193 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
199 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
200 dma_addr_t addr, unsigned char coef)
206 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
213 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
220 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
225 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
228 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
234 static inline int iop_adma_get_max_xor(void)
239 static inline int iop_adma_get_max_pq(void)
245 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
247 int id = chan->device->id;
252 return __raw_readl(DMA_DAR(chan));
254 return __raw_readl(AAU_ADAR(chan));
261 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
264 int id = chan->device->id;
269 __raw_writel(next_desc_addr, DMA_NDAR(chan));
272 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
278 #define IOP_ADMA_STATUS_BUSY (1 << 10)
279 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
280 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
281 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
283 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
285 u32 status = __raw_readl(DMA_CSR(chan));
286 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
289 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
292 /* num_slots will only ever be 1, 2, 4, or 8 */
293 return (desc->idx & (num_slots - 1)) ? 0 : 1;
296 /* to do: support large (i.e. > hw max) buffer sizes */
297 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
303 /* to do: support large (i.e. > hw max) buffer sizes */
304 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
310 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
313 static const char slot_count_table[] = {
314 1, 1, 1, 1, /* 01 - 04 */
315 2, 2, 2, 2, /* 05 - 08 */
316 4, 4, 4, 4, /* 09 - 12 */
317 4, 4, 4, 4, /* 13 - 16 */
318 8, 8, 8, 8, /* 17 - 20 */
319 8, 8, 8, 8, /* 21 - 24 */
320 8, 8, 8, 8, /* 25 - 28 */
321 8, 8, 8, 8, /* 29 - 32 */
323 *slots_per_op = slot_count_table[src_cnt - 1];
324 return *slots_per_op;
328 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
330 switch (chan->device->id) {
333 return iop_chan_memcpy_slot_count(0, slots_per_op);
335 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
342 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
345 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
347 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
350 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
351 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
352 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
353 slot_cnt += *slots_per_op;
356 slot_cnt += *slots_per_op;
361 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
364 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
367 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
369 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
372 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
373 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
374 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
375 slot_cnt += *slots_per_op;
378 slot_cnt += *slots_per_op;
383 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
384 struct iop_adma_chan *chan)
386 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
388 switch (chan->device->id) {
391 return hw_desc.dma->byte_count;
393 return hw_desc.aau->byte_count;
400 /* translate the src_idx to a descriptor word index */
401 static inline int __desc_idx(int src_idx)
403 static const int desc_idx_table[] = { 0, 0, 0, 0,
413 return desc_idx_table[src_idx];
416 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
417 struct iop_adma_chan *chan,
420 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
422 switch (chan->device->id) {
425 return hw_desc.dma->src_addr;
433 return hw_desc.aau->src[src_idx];
435 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
438 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
439 int src_idx, dma_addr_t addr)
442 hw_desc->src[src_idx] = addr;
444 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
448 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
450 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
453 struct iop3xx_dma_desc_ctrl field;
456 u_desc_ctrl.value = 0;
457 u_desc_ctrl.field.mem_to_mem_en = 1;
458 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
459 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
460 hw_desc->desc_ctrl = u_desc_ctrl.value;
461 hw_desc->upper_pci_src_addr = 0;
462 hw_desc->crc_addr = 0;
466 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
468 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
471 struct iop3xx_aau_desc_ctrl field;
474 u_desc_ctrl.value = 0;
475 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
476 u_desc_ctrl.field.dest_write_en = 1;
477 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
478 hw_desc->desc_ctrl = u_desc_ctrl.value;
482 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
489 struct iop3xx_aau_desc_ctrl field;
492 u_desc_ctrl.value = 0;
495 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
498 for (i = 24; i < src_cnt; i++) {
499 edcr |= (1 << shift);
502 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
506 if (!u_desc_ctrl.field.blk_ctrl) {
507 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
508 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
512 for (i = 16; i < src_cnt; i++) {
513 edcr |= (1 << shift);
516 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
520 if (!u_desc_ctrl.field.blk_ctrl)
521 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
524 for (i = 8; i < src_cnt; i++) {
525 edcr |= (1 << shift);
528 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
533 for (i = 0; i < src_cnt; i++) {
534 u_desc_ctrl.value |= (1 << shift);
538 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
539 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
542 u_desc_ctrl.field.dest_write_en = 1;
543 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
544 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
545 hw_desc->desc_ctrl = u_desc_ctrl.value;
547 return u_desc_ctrl.value;
551 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
554 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
557 /* return the number of operations */
559 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
562 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
563 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
566 struct iop3xx_aau_desc_ctrl field;
570 hw_desc = desc->hw_desc;
572 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
573 i += slots_per_op, j++) {
574 iter = iop_hw_desc_slot_idx(hw_desc, i);
575 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
576 u_desc_ctrl.field.dest_write_en = 0;
577 u_desc_ctrl.field.zero_result_en = 1;
578 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
579 iter->desc_ctrl = u_desc_ctrl.value;
581 /* for the subsequent descriptors preserve the store queue
582 * and chain them together
586 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
587 prev_hw_desc->next_desc =
588 (u32) (desc->async_tx.phys + (i << 5));
596 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
599 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
602 struct iop3xx_aau_desc_ctrl field;
605 u_desc_ctrl.value = 0;
608 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
609 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
612 if (!u_desc_ctrl.field.blk_ctrl) {
613 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
614 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
616 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
619 if (!u_desc_ctrl.field.blk_ctrl)
620 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
621 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
624 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
625 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
628 u_desc_ctrl.field.dest_write_en = 0;
629 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
630 hw_desc->desc_ctrl = u_desc_ctrl.value;
633 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
634 struct iop_adma_chan *chan,
637 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
639 switch (chan->device->id) {
642 hw_desc.dma->byte_count = byte_count;
645 hw_desc.aau->byte_count = byte_count;
653 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
654 struct iop_adma_chan *chan)
656 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
658 switch (chan->device->id) {
661 iop_desc_init_memcpy(desc, 1);
662 hw_desc.dma->byte_count = 0;
663 hw_desc.dma->dest_addr = 0;
664 hw_desc.dma->src_addr = 0;
667 iop_desc_init_null_xor(desc, 2, 1);
668 hw_desc.aau->byte_count = 0;
669 hw_desc.aau->dest_addr = 0;
670 hw_desc.aau->src[0] = 0;
671 hw_desc.aau->src[1] = 0;
679 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
681 int slots_per_op = desc->slots_per_op;
682 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
685 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
686 hw_desc->byte_count = len;
689 iter = iop_hw_desc_slot_idx(hw_desc, i);
690 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
691 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
693 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
695 iter = iop_hw_desc_slot_idx(hw_desc, i);
696 iter->byte_count = len;
700 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
701 struct iop_adma_chan *chan,
704 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
706 switch (chan->device->id) {
709 hw_desc.dma->dest_addr = addr;
712 hw_desc.aau->dest_addr = addr;
719 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
722 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
723 hw_desc->src_addr = addr;
727 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
731 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
732 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
735 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
736 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
737 iter = iop_hw_desc_slot_idx(hw_desc, i);
738 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
742 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
743 int src_idx, dma_addr_t addr)
746 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
747 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
750 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
751 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
752 iter = iop_hw_desc_slot_idx(hw_desc, i);
753 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
757 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
760 /* hw_desc->next_desc is the same location for all channels */
761 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
763 iop_paranoia(hw_desc.dma->next_desc);
764 hw_desc.dma->next_desc = next_desc_addr;
767 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
769 /* hw_desc->next_desc is the same location for all channels */
770 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
771 return hw_desc.dma->next_desc;
774 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
776 /* hw_desc->next_desc is the same location for all channels */
777 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
778 hw_desc.dma->next_desc = 0;
781 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
784 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
785 hw_desc->src[0] = val;
788 static inline enum sum_check_flags
789 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
791 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
792 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
794 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
795 return desc_ctrl.zero_result_err << SUM_CHECK_P;
798 static inline void iop_chan_append(struct iop_adma_chan *chan)
802 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
803 dma_chan_ctrl |= 0x2;
804 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
807 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
809 return __raw_readl(DMA_CSR(chan));
812 static inline void iop_chan_disable(struct iop_adma_chan *chan)
814 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
816 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
819 static inline void iop_chan_enable(struct iop_adma_chan *chan)
821 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
824 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
827 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
829 u32 status = __raw_readl(DMA_CSR(chan));
831 __raw_writel(status, DMA_CSR(chan));
834 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
836 u32 status = __raw_readl(DMA_CSR(chan));
838 __raw_writel(status, DMA_CSR(chan));
841 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
843 u32 status = __raw_readl(DMA_CSR(chan));
845 switch (chan->device->id) {
848 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
857 __raw_writel(status, DMA_CSR(chan));
861 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
867 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
873 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
879 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
881 return test_bit(5, &status);
885 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
887 switch (chan->device->id) {
890 return test_bit(2, &status);
897 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
899 switch (chan->device->id) {
902 return test_bit(3, &status);
909 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
911 switch (chan->device->id) {
914 return test_bit(1, &status);