Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / cxgb3 / iwch_cq.c
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include "iwch_provider.h"
34 #include "iwch.h"
35
36 /*
37  * Get one cq entry from cxio and map it to openib.
38  *
39  * Returns:
40  *      0                       EMPTY;
41  *      1                       cqe returned
42  *      -EAGAIN         caller must try again
43  *      any other -errno        fatal error
44  */
45 static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
46                             struct ib_wc *wc)
47 {
48         struct iwch_qp *qhp = NULL;
49         struct t3_cqe cqe, *rd_cqe;
50         struct t3_wq *wq;
51         u32 credit = 0;
52         u8 cqe_flushed;
53         u64 cookie;
54         int ret = 1;
55
56         rd_cqe = cxio_next_cqe(&chp->cq);
57
58         if (!rd_cqe)
59                 return 0;
60
61         qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
62         if (!qhp)
63                 wq = NULL;
64         else {
65                 spin_lock(&qhp->lock);
66                 wq = &(qhp->wq);
67         }
68         ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
69                                    &credit);
70         if (t3a_device(chp->rhp) && credit) {
71                 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
72                      credit, chp->cq.cqid);
73                 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
74         }
75
76         if (ret) {
77                 ret = -EAGAIN;
78                 goto out;
79         }
80         ret = 1;
81
82         wc->wr_id = cookie;
83         wc->qp = &qhp->ibqp;
84         wc->vendor_err = CQE_STATUS(cqe);
85
86         PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
87              "lo 0x%x cookie 0x%llx\n", __FUNCTION__,
88              CQE_QPID(cqe), CQE_TYPE(cqe),
89              CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
90              CQE_WRID_LOW(cqe), (unsigned long long) cookie);
91
92         if (CQE_TYPE(cqe) == 0) {
93                 if (!CQE_STATUS(cqe))
94                         wc->byte_len = CQE_LEN(cqe);
95                 else
96                         wc->byte_len = 0;
97                 wc->opcode = IB_WC_RECV;
98         } else {
99                 switch (CQE_OPCODE(cqe)) {
100                 case T3_RDMA_WRITE:
101                         wc->opcode = IB_WC_RDMA_WRITE;
102                         break;
103                 case T3_READ_REQ:
104                         wc->opcode = IB_WC_RDMA_READ;
105                         wc->byte_len = CQE_LEN(cqe);
106                         break;
107                 case T3_SEND:
108                 case T3_SEND_WITH_SE:
109                         wc->opcode = IB_WC_SEND;
110                         break;
111                 case T3_BIND_MW:
112                         wc->opcode = IB_WC_BIND_MW;
113                         break;
114
115                 /* these aren't supported yet */
116                 case T3_SEND_WITH_INV:
117                 case T3_SEND_WITH_SE_INV:
118                 case T3_LOCAL_INV:
119                 case T3_FAST_REGISTER:
120                 default:
121                         printk(KERN_ERR MOD "Unexpected opcode %d "
122                                "in the CQE received for QPID=0x%0x\n",
123                                CQE_OPCODE(cqe), CQE_QPID(cqe));
124                         ret = -EINVAL;
125                         goto out;
126                 }
127         }
128
129         if (cqe_flushed)
130                 wc->status = IB_WC_WR_FLUSH_ERR;
131         else {
132
133                 switch (CQE_STATUS(cqe)) {
134                 case TPT_ERR_SUCCESS:
135                         wc->status = IB_WC_SUCCESS;
136                         break;
137                 case TPT_ERR_STAG:
138                         wc->status = IB_WC_LOC_ACCESS_ERR;
139                         break;
140                 case TPT_ERR_PDID:
141                         wc->status = IB_WC_LOC_PROT_ERR;
142                         break;
143                 case TPT_ERR_QPID:
144                 case TPT_ERR_ACCESS:
145                         wc->status = IB_WC_LOC_ACCESS_ERR;
146                         break;
147                 case TPT_ERR_WRAP:
148                         wc->status = IB_WC_GENERAL_ERR;
149                         break;
150                 case TPT_ERR_BOUND:
151                         wc->status = IB_WC_LOC_LEN_ERR;
152                         break;
153                 case TPT_ERR_INVALIDATE_SHARED_MR:
154                 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
155                         wc->status = IB_WC_MW_BIND_ERR;
156                         break;
157                 case TPT_ERR_CRC:
158                 case TPT_ERR_MARKER:
159                 case TPT_ERR_PDU_LEN_ERR:
160                 case TPT_ERR_OUT_OF_RQE:
161                 case TPT_ERR_DDP_VERSION:
162                 case TPT_ERR_RDMA_VERSION:
163                 case TPT_ERR_DDP_QUEUE_NUM:
164                 case TPT_ERR_MSN:
165                 case TPT_ERR_TBIT:
166                 case TPT_ERR_MO:
167                 case TPT_ERR_MSN_RANGE:
168                 case TPT_ERR_IRD_OVERFLOW:
169                 case TPT_ERR_OPCODE:
170                         wc->status = IB_WC_FATAL_ERR;
171                         break;
172                 case TPT_ERR_SWFLUSH:
173                         wc->status = IB_WC_WR_FLUSH_ERR;
174                         break;
175                 default:
176                         printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
177                                "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
178                         ret = -EINVAL;
179                 }
180         }
181 out:
182         if (wq)
183                 spin_unlock(&qhp->lock);
184         return ret;
185 }
186
187 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
188 {
189         struct iwch_dev *rhp;
190         struct iwch_cq *chp;
191         unsigned long flags;
192         int npolled;
193         int err = 0;
194
195         chp = to_iwch_cq(ibcq);
196         rhp = chp->rhp;
197
198         spin_lock_irqsave(&chp->lock, flags);
199         for (npolled = 0; npolled < num_entries; ++npolled) {
200 #ifdef DEBUG
201                 int i=0;
202 #endif
203
204                 /*
205                  * Because T3 can post CQEs that are _not_ associated
206                  * with a WR, we might have to poll again after removing
207                  * one of these.
208                  */
209                 do {
210                         err = iwch_poll_cq_one(rhp, chp, wc + npolled);
211 #ifdef DEBUG
212                         BUG_ON(++i > 1000);
213 #endif
214                 } while (err == -EAGAIN);
215                 if (err <= 0)
216                         break;
217         }
218         spin_unlock_irqrestore(&chp->lock, flags);
219
220         if (err < 0)
221                 return err;
222         else {
223                 return npolled;
224         }
225 }