Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / scsi / cxgb3i / cxgb3i_ddp.c
1 /*
2  * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3  *
4  * Copyright (c) 2008 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Karen Xie (kxie@chelsio.com)
11  */
12
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
15
16 /* from cxgb3 LLD */
17 #include "common.h"
18 #include "t3_cpl.h"
19 #include "t3cdev.h"
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
23
24 #include "cxgb3i_ddp.h"
25
26 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
27 #define ddp_log_warn(fmt...)  printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
28 #define ddp_log_info(fmt...)  printk(KERN_INFO "cxgb3i_ddp: " fmt)
29
30 #ifdef __DEBUG_CXGB3I_DDP__
31 #define ddp_log_debug(fmt, args...) \
32         printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
33 #else
34 #define ddp_log_debug(fmt...)
35 #endif
36
37 /*
38  * iSCSI Direct Data Placement
39  *
40  * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
41  * pre-posted final destination host-memory buffers based on the Initiator
42  * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
43  *
44  * The host memory address is programmed into h/w in the format of pagepod
45  * entries.
46  * The location of the pagepod entry is encoded into ddp tag which is used or
47  * is the base for ITT/TTT.
48  */
49
50 #define DDP_PGIDX_MAX           4
51 #define DDP_THRESHOLD   2048
52 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
53 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
54 static unsigned char page_idx = DDP_PGIDX_MAX;
55
56 /*
57  * functions to program the pagepod in h/w
58  */
59 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
60 {
61         struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
62
63         req->wr.wr_lo = 0;
64         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
65         req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
66                                    V_ULPTX_CMD(ULP_MEM_WRITE));
67         req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
68                          V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
69 }
70
71 static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
72                        unsigned int idx, unsigned int npods,
73                        struct cxgb3i_gather_list *gl)
74 {
75         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
76         int i;
77
78         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
79                 struct sk_buff *skb = ddp->gl_skb[idx];
80                 struct pagepod *ppod;
81                 int j, pidx;
82
83                 /* hold on to the skb until we clear the ddp mapping */
84                 skb_get(skb);
85
86                 ulp_mem_io_set_hdr(skb, pm_addr);
87                 ppod = (struct pagepod *)
88                        (skb->head + sizeof(struct ulp_mem_io));
89                 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
90                 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
91                         ppod->addr[j] = pidx < gl->nelem ?
92                                      cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
93
94                 skb->priority = CPL_PRIORITY_CONTROL;
95                 cxgb3_ofld_send(ddp->tdev, skb);
96         }
97         return 0;
98 }
99
100 static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag,
101                          unsigned int idx, unsigned int npods)
102 {
103         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
104         int i;
105
106         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
107                 struct sk_buff *skb = ddp->gl_skb[idx];
108
109                 if (!skb) {
110                         ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
111                                         tag, idx, i, npods);
112                         continue;
113                 }
114                 ddp->gl_skb[idx] = NULL;
115                 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
116                 ulp_mem_io_set_hdr(skb, pm_addr);
117                 skb->priority = CPL_PRIORITY_CONTROL;
118                 cxgb3_ofld_send(ddp->tdev, skb);
119         }
120 }
121
122 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
123                                           unsigned int start, unsigned int max,
124                                           unsigned int count,
125                                           struct cxgb3i_gather_list *gl)
126 {
127         unsigned int i, j, k;
128
129         /* not enough entries */
130         if ((max - start) < count)
131                 return -EBUSY;
132
133         max -= count;
134         spin_lock(&ddp->map_lock);
135         for (i = start; i < max;) {
136                 for (j = 0, k = i; j < count; j++, k++) {
137                         if (ddp->gl_map[k])
138                                 break;
139                 }
140                 if (j == count) {
141                         for (j = 0, k = i; j < count; j++, k++)
142                                 ddp->gl_map[k] = gl;
143                         spin_unlock(&ddp->map_lock);
144                         return i;
145                 }
146                 i += j + 1;
147         }
148         spin_unlock(&ddp->map_lock);
149         return -EBUSY;
150 }
151
152 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
153                                       int start, int count)
154 {
155         spin_lock(&ddp->map_lock);
156         memset(&ddp->gl_map[start], 0,
157                count * sizeof(struct cxgb3i_gather_list *));
158         spin_unlock(&ddp->map_lock);
159 }
160
161 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
162                                    int idx, int count)
163 {
164         int i;
165
166         for (i = 0; i < count; i++, idx++)
167                 if (ddp->gl_skb[idx]) {
168                         kfree_skb(ddp->gl_skb[idx]);
169                         ddp->gl_skb[idx] = NULL;
170                 }
171 }
172
173 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
174                                    int count, gfp_t gfp)
175 {
176         int i;
177
178         for (i = 0; i < count; i++) {
179                 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
180                                                 PPOD_SIZE, gfp);
181                 if (skb) {
182                         ddp->gl_skb[idx + i] = skb;
183                         skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
184                 } else {
185                         ddp_free_gl_skb(ddp, idx, i);
186                         return -ENOMEM;
187                 }
188         }
189         return 0;
190 }
191
192 /**
193  * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
194  * @pgsz: page size
195  * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
196  */
197 int cxgb3i_ddp_find_page_index(unsigned long pgsz)
198 {
199         int i;
200
201         for (i = 0; i < DDP_PGIDX_MAX; i++) {
202                 if (pgsz == (1UL << ddp_page_shift[i]))
203                         return i;
204         }
205         ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
206         return DDP_PGIDX_MAX;
207 }
208
209 static inline void ddp_gl_unmap(struct pci_dev *pdev,
210                                 struct cxgb3i_gather_list *gl)
211 {
212         int i;
213
214         for (i = 0; i < gl->nelem; i++)
215                 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
216                                PCI_DMA_FROMDEVICE);
217 }
218
219 static inline int ddp_gl_map(struct pci_dev *pdev,
220                              struct cxgb3i_gather_list *gl)
221 {
222         int i;
223
224         for (i = 0; i < gl->nelem; i++) {
225                 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
226                                                 PAGE_SIZE,
227                                                 PCI_DMA_FROMDEVICE);
228                 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
229                         goto unmap;
230         }
231
232         return i;
233
234 unmap:
235         if (i) {
236                 unsigned int nelem = gl->nelem;
237
238                 gl->nelem = i;
239                 ddp_gl_unmap(pdev, gl);
240                 gl->nelem = nelem;
241         }
242         return -ENOMEM;
243 }
244
245 /**
246  * cxgb3i_ddp_make_gl - build ddp page buffer list
247  * @xferlen: total buffer length
248  * @sgl: page buffer scatter-gather list
249  * @sgcnt: # of page buffers
250  * @pdev: pci_dev, used for pci map
251  * @gfp: allocation mode
252  *
253  * construct a ddp page buffer list from the scsi scattergather list.
254  * coalesce buffers as much as possible, and obtain dma addresses for
255  * each page.
256  *
257  * Return the cxgb3i_gather_list constructed from the page buffers if the
258  * memory can be used for ddp. Return NULL otherwise.
259  */
260 struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
261                                               struct scatterlist *sgl,
262                                               unsigned int sgcnt,
263                                               struct pci_dev *pdev,
264                                               gfp_t gfp)
265 {
266         struct cxgb3i_gather_list *gl;
267         struct scatterlist *sg = sgl;
268         struct page *sgpage = sg_page(sg);
269         unsigned int sglen = sg->length;
270         unsigned int sgoffset = sg->offset;
271         unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
272                               PAGE_SHIFT;
273         int i = 1, j = 0;
274
275         if (xferlen < DDP_THRESHOLD) {
276                 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
277                               xferlen, DDP_THRESHOLD);
278                 return NULL;
279         }
280
281         gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
282                      npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
283                      gfp);
284         if (!gl)
285                 return NULL;
286
287         gl->pages = (struct page **)&gl->phys_addr[npages];
288         gl->length = xferlen;
289         gl->offset = sgoffset;
290         gl->pages[0] = sgpage;
291
292         sg = sg_next(sg);
293         while (sg) {
294                 struct page *page = sg_page(sg);
295
296                 if (sgpage == page && sg->offset == sgoffset + sglen)
297                         sglen += sg->length;
298                 else {
299                         /* make sure the sgl is fit for ddp:
300                          * each has the same page size, and
301                          * all of the middle pages are used completely
302                          */
303                         if ((j && sgoffset) ||
304                             ((i != sgcnt - 1) &&
305                              ((sglen + sgoffset) & ~PAGE_MASK)))
306                                 goto error_out;
307
308                         j++;
309                         if (j == gl->nelem || sg->offset)
310                                 goto error_out;
311                         gl->pages[j] = page;
312                         sglen = sg->length;
313                         sgoffset = sg->offset;
314                         sgpage = page;
315                 }
316                 i++;
317                 sg = sg_next(sg);
318         }
319         gl->nelem = ++j;
320
321         if (ddp_gl_map(pdev, gl) < 0)
322                 goto error_out;
323
324         return gl;
325
326 error_out:
327         kfree(gl);
328         return NULL;
329 }
330
331 /**
332  * cxgb3i_ddp_release_gl - release a page buffer list
333  * @gl: a ddp page buffer list
334  * @pdev: pci_dev used for pci_unmap
335  * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
336  */
337 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
338                            struct pci_dev *pdev)
339 {
340         ddp_gl_unmap(pdev, gl);
341         kfree(gl);
342 }
343
344 /**
345  * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
346  * @tdev: t3cdev adapter
347  * @tid: connection id
348  * @tformat: tag format
349  * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
350  * @gl: the page momory list
351  * @gfp: allocation mode
352  *
353  * ddp setup for a given page buffer list and construct the ddp tag.
354  * return 0 if success, < 0 otherwise.
355  */
356 int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
357                            struct cxgb3i_tag_format *tformat, u32 *tagp,
358                            struct cxgb3i_gather_list *gl, gfp_t gfp)
359 {
360         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
361         struct pagepod_hdr hdr;
362         unsigned int npods;
363         int idx = -1;
364         int err = -ENOMEM;
365         u32 sw_tag = *tagp;
366         u32 tag;
367
368         if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
369                 gl->length < DDP_THRESHOLD) {
370                 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
371                               page_idx, gl->length, DDP_THRESHOLD);
372                 return -EINVAL;
373         }
374
375         npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
376
377         if (ddp->idx_last == ddp->nppods)
378                 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl);
379         else {
380                 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
381                                               ddp->nppods, npods, gl);
382                 if (idx < 0 && ddp->idx_last >= npods) {
383                         idx = ddp_find_unused_entries(ddp, 0,
384                                 min(ddp->idx_last + npods, ddp->nppods),
385                                                       npods, gl);
386                 }
387         }
388         if (idx < 0) {
389                 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
390                               gl->length, gl->nelem, npods);
391                 return idx;
392         }
393
394         err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
395         if (err < 0)
396                 goto unmark_entries;
397
398         tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
399         tag |= idx << PPOD_IDX_SHIFT;
400
401         hdr.rsvd = 0;
402         hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
403         hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
404         hdr.maxoffset = htonl(gl->length);
405         hdr.pgoffset = htonl(gl->offset);
406
407         err = set_ddp_map(ddp, &hdr, idx, npods, gl);
408         if (err < 0)
409                 goto free_gl_skb;
410
411         ddp->idx_last = idx;
412         ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
413                       gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
414                       idx, npods);
415         *tagp = tag;
416         return 0;
417
418 free_gl_skb:
419         ddp_free_gl_skb(ddp, idx, npods);
420 unmark_entries:
421         ddp_unmark_entries(ddp, idx, npods);
422         return err;
423 }
424
425 /**
426  * cxgb3i_ddp_tag_release - release a ddp tag
427  * @tdev: t3cdev adapter
428  * @tag: ddp tag
429  * ddp cleanup for a given ddp tag and release all the resources held
430  */
431 void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
432 {
433         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
434         u32 idx;
435
436         if (!ddp) {
437                 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
438                 return;
439         }
440
441         idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
442         if (idx < ddp->nppods) {
443                 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
444                 unsigned int npods;
445
446                 if (!gl || !gl->nelem) {
447                         ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
448                                       tag, idx, gl, gl ? gl->nelem : 0);
449                         return;
450                 }
451                 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
452                 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
453                               tag, idx, npods);
454                 clear_ddp_map(ddp, tag, idx, npods);
455                 ddp_unmark_entries(ddp, idx, npods);
456                 cxgb3i_ddp_release_gl(gl, ddp->pdev);
457         } else
458                 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
459                               tag, idx, ddp->nppods);
460 }
461
462 static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
463                             int reply)
464 {
465         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
466                                         GFP_KERNEL);
467         struct cpl_set_tcb_field *req;
468         u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
469
470         if (!skb)
471                 return -ENOMEM;
472
473         /* set up ulp submode and page size */
474         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
475         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
476         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
477         req->reply = V_NO_REPLY(reply ? 0 : 1);
478         req->cpu_idx = 0;
479         req->word = htons(31);
480         req->mask = cpu_to_be64(0xF0000000);
481         req->val = cpu_to_be64(val << 28);
482         skb->priority = CPL_PRIORITY_CONTROL;
483
484         cxgb3_ofld_send(tdev, skb);
485         return 0;
486 }
487
488 /**
489  * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
490  * @tdev: t3cdev adapter
491  * @tid: connection id
492  * @reply: request reply from h/w
493  * set up the ddp page size based on the host PAGE_SIZE for a connection
494  * identified by tid
495  */
496 int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
497                                     int reply)
498 {
499         return setup_conn_pgidx(tdev, tid, page_idx, reply);
500 }
501
502 /**
503  * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
504  * @tdev: t3cdev adapter
505  * @tid: connection id
506  * @reply: request reply from h/w
507  * @pgsz: ddp page size
508  * set up the ddp page size for a connection identified by tid
509  */
510 int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
511                                 int reply, unsigned long pgsz)
512 {
513         int pgidx = cxgb3i_ddp_find_page_index(pgsz);
514
515         return setup_conn_pgidx(tdev, tid, pgidx, reply);
516 }
517
518 /**
519  * cxgb3i_setup_conn_digest - setup conn. digest setting
520  * @tdev: t3cdev adapter
521  * @tid: connection id
522  * @hcrc: header digest enabled
523  * @dcrc: data digest enabled
524  * @reply: request reply from h/w
525  * set up the iscsi digest settings for a connection identified by tid
526  */
527 int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
528                              int hcrc, int dcrc, int reply)
529 {
530         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
531                                         GFP_KERNEL);
532         struct cpl_set_tcb_field *req;
533         u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
534
535         if (!skb)
536                 return -ENOMEM;
537
538         /* set up ulp submode and page size */
539         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
540         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
542         req->reply = V_NO_REPLY(reply ? 0 : 1);
543         req->cpu_idx = 0;
544         req->word = htons(31);
545         req->mask = cpu_to_be64(0x0F000000);
546         req->val = cpu_to_be64(val << 24);
547         skb->priority = CPL_PRIORITY_CONTROL;
548
549         cxgb3_ofld_send(tdev, skb);
550         return 0;
551 }
552
553
554 /**
555  * cxgb3i_adapter_ddp_info - read the adapter's ddp information
556  * @tdev: t3cdev adapter
557  * @tformat: tag format
558  * @txsz: max tx pdu payload size, filled in by this func.
559  * @rxsz: max rx pdu payload size, filled in by this func.
560  * setup the tag format for a given iscsi entity
561  */
562 int cxgb3i_adapter_ddp_info(struct t3cdev *tdev,
563                             struct cxgb3i_tag_format *tformat,
564                             unsigned int *txsz, unsigned int *rxsz)
565 {
566         struct cxgb3i_ddp_info *ddp;
567         unsigned char idx_bits;
568
569         if (!tformat)
570                 return -EINVAL;
571
572         if (!tdev->ulp_iscsi)
573                 return -EINVAL;
574
575         ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
576
577         idx_bits = 32 - tformat->sw_bits;
578         tformat->rsvd_bits = ddp->idx_bits;
579         tformat->rsvd_shift = PPOD_IDX_SHIFT;
580         tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
581
582         ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
583                       tformat->sw_bits, tformat->rsvd_bits,
584                       tformat->rsvd_shift, tformat->rsvd_mask);
585
586         *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
587                         ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
588         *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
589                         ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
590         ddp_log_info("max payload size: %u/%u, %u/%u.\n",
591                      *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
592         return 0;
593 }
594
595 /**
596  * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
597  * @tdev: t3cdev adapter
598  * release all the resource held by the ddp pagepod manager for a given
599  * adapter if needed
600  */
601 void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
602 {
603         int i = 0;
604         struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
605
606         ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
607
608         if (ddp) {
609                 tdev->ulp_iscsi = NULL;
610                 while (i < ddp->nppods) {
611                         struct cxgb3i_gather_list *gl = ddp->gl_map[i];
612                         if (gl) {
613                                 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
614                                                 >> PPOD_PAGES_SHIFT;
615                                 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
616                                                 tdev, i, npods);
617                                 kfree(gl);
618                                 ddp_free_gl_skb(ddp, i, npods);
619                                 i += npods;
620                         } else
621                                 i++;
622                 }
623                 cxgb3i_free_big_mem(ddp);
624         }
625 }
626
627 /**
628  * ddp_init - initialize the cxgb3 adapter's ddp resource
629  * @tdev: t3cdev adapter
630  * initialize the ddp pagepod manager for a given adapter
631  */
632 static void ddp_init(struct t3cdev *tdev)
633 {
634         struct cxgb3i_ddp_info *ddp;
635         struct ulp_iscsi_info uinfo;
636         unsigned int ppmax, bits;
637         int i, err;
638
639         if (tdev->ulp_iscsi) {
640                 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
641                                 tdev, tdev->ulp_iscsi);
642                 return;
643         }
644
645         err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
646         if (err < 0) {
647                 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
648                                  tdev->name, err);
649                 return;
650         }
651
652         ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
653         bits = __ilog2_u32(ppmax) + 1;
654         if (bits > PPOD_IDX_MAX_SIZE)
655                 bits = PPOD_IDX_MAX_SIZE;
656         ppmax = (1 << (bits - 1)) - 1;
657
658         ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
659                                    ppmax *
660                                         (sizeof(struct cxgb3i_gather_list *) +
661                                         sizeof(struct sk_buff *)),
662                                    GFP_KERNEL);
663         if (!ddp) {
664                 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
665                              tdev->name, ppmax);
666                 return;
667         }
668         ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
669         ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
670                                           ppmax *
671                                           sizeof(struct cxgb3i_gather_list *));
672         spin_lock_init(&ddp->map_lock);
673
674         ddp->tdev = tdev;
675         ddp->pdev = uinfo.pdev;
676         ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
677         ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
678         ddp->llimit = uinfo.llimit;
679         ddp->ulimit = uinfo.ulimit;
680         ddp->nppods = ppmax;
681         ddp->idx_last = ppmax;
682         ddp->idx_bits = bits;
683         ddp->idx_mask = (1 << bits) - 1;
684         ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
685
686         uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
687         for (i = 0; i < DDP_PGIDX_MAX; i++)
688                 uinfo.pgsz_factor[i] = ddp_page_order[i];
689         uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
690
691         err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
692         if (err < 0) {
693                 ddp_log_warn("%s unable to set iscsi param err=%d, "
694                               "ddp disabled.\n", tdev->name, err);
695                 goto free_ddp_map;
696         }
697
698         tdev->ulp_iscsi = ddp;
699
700         ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
701                         " %u/%u.\n",
702                         tdev, ppmax, ddp->idx_bits, ddp->idx_mask,
703                         ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
704                         ddp->max_rxsz, uinfo.max_rxsz);
705         return;
706
707 free_ddp_map:
708         cxgb3i_free_big_mem(ddp);
709 }
710
711 /**
712  * cxgb3i_ddp_init - initialize ddp functions
713  */
714 void cxgb3i_ddp_init(struct t3cdev *tdev)
715 {
716         if (page_idx == DDP_PGIDX_MAX) {
717                 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
718                 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
719                                 PAGE_SIZE, page_idx);
720         }
721         ddp_init(tdev);
722 }