crypto: ixp4xx - Fix handling of chained sg buffers
[sfrench/cifs-2.6.git] / drivers / scsi / cxgb3i / cxgb3i_ddp.c
1 /*
2  * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3  *
4  * Copyright (c) 2008 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Karen Xie (kxie@chelsio.com)
11  */
12
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
15
16 /* from cxgb3 LLD */
17 #include "common.h"
18 #include "t3_cpl.h"
19 #include "t3cdev.h"
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
23
24 #include "cxgb3i_ddp.h"
25
26 #define DRV_MODULE_NAME         "cxgb3i_ddp"
27 #define DRV_MODULE_VERSION      "1.0.0"
28 #define DRV_MODULE_RELDATE      "Dec. 1, 2008"
29
30 static char version[] =
31         "Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
32         " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
33
34 MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
35 MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
36 MODULE_LICENSE("GPL");
37 MODULE_VERSION(DRV_MODULE_VERSION);
38
39 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
40 #define ddp_log_warn(fmt...)  printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
41 #define ddp_log_info(fmt...)  printk(KERN_INFO "cxgb3i_ddp: " fmt)
42
43 #ifdef __DEBUG_CXGB3I_DDP__
44 #define ddp_log_debug(fmt, args...) \
45         printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
46 #else
47 #define ddp_log_debug(fmt...)
48 #endif
49
50 /*
51  * iSCSI Direct Data Placement
52  *
53  * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
54  * pre-posted final destination host-memory buffers based on the Initiator
55  * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
56  *
57  * The host memory address is programmed into h/w in the format of pagepod
58  * entries.
59  * The location of the pagepod entry is encoded into ddp tag which is used or
60  * is the base for ITT/TTT.
61  */
62
63 #define DDP_PGIDX_MAX           4
64 #define DDP_THRESHOLD   2048
65 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
66 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
67 static unsigned char page_idx = DDP_PGIDX_MAX;
68
69 static LIST_HEAD(cxgb3i_ddp_list);
70 static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
71
72 /*
73  * functions to program the pagepod in h/w
74  */
75 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
76 {
77         struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
78
79         req->wr.wr_lo = 0;
80         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
81         req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
82                                    V_ULPTX_CMD(ULP_MEM_WRITE));
83         req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
84                          V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
85 }
86
87 static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
88                        unsigned int idx, unsigned int npods,
89                        struct cxgb3i_gather_list *gl)
90 {
91         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
92         int i;
93
94         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
95                 struct sk_buff *skb = ddp->gl_skb[idx];
96                 struct pagepod *ppod;
97                 int j, pidx;
98
99                 /* hold on to the skb until we clear the ddp mapping */
100                 skb_get(skb);
101
102                 ulp_mem_io_set_hdr(skb, pm_addr);
103                 ppod = (struct pagepod *)
104                        (skb->head + sizeof(struct ulp_mem_io));
105                 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
106                 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
107                         ppod->addr[j] = pidx < gl->nelem ?
108                                      cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
109
110                 skb->priority = CPL_PRIORITY_CONTROL;
111                 cxgb3_ofld_send(ddp->tdev, skb);
112         }
113         return 0;
114 }
115
116 static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
117                          unsigned int npods)
118 {
119         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
120         int i;
121
122         for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
123                 struct sk_buff *skb = ddp->gl_skb[idx];
124
125                 ddp->gl_skb[idx] = NULL;
126                 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
127                 ulp_mem_io_set_hdr(skb, pm_addr);
128                 skb->priority = CPL_PRIORITY_CONTROL;
129                 cxgb3_ofld_send(ddp->tdev, skb);
130         }
131         return 0;
132 }
133
134 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
135                                           int start, int max, int count,
136                                           struct cxgb3i_gather_list *gl)
137 {
138         unsigned int i, j;
139
140         spin_lock(&ddp->map_lock);
141         for (i = start; i <= max;) {
142                 for (j = 0; j < count; j++) {
143                         if (ddp->gl_map[i + j])
144                                 break;
145                 }
146                 if (j == count) {
147                         for (j = 0; j < count; j++)
148                                 ddp->gl_map[i + j] = gl;
149                         spin_unlock(&ddp->map_lock);
150                         return i;
151                 }
152                 i += j + 1;
153         }
154         spin_unlock(&ddp->map_lock);
155         return -EBUSY;
156 }
157
158 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
159                                       int start, int count)
160 {
161         spin_lock(&ddp->map_lock);
162         memset(&ddp->gl_map[start], 0,
163                count * sizeof(struct cxgb3i_gather_list *));
164         spin_unlock(&ddp->map_lock);
165 }
166
167 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
168                                    int idx, int count)
169 {
170         int i;
171
172         for (i = 0; i < count; i++, idx++)
173                 if (ddp->gl_skb[idx]) {
174                         kfree_skb(ddp->gl_skb[idx]);
175                         ddp->gl_skb[idx] = NULL;
176                 }
177 }
178
179 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
180                                    int count, gfp_t gfp)
181 {
182         int i;
183
184         for (i = 0; i < count; i++) {
185                 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
186                                                 PPOD_SIZE, gfp);
187                 if (skb) {
188                         ddp->gl_skb[idx + i] = skb;
189                         skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
190                 } else {
191                         ddp_free_gl_skb(ddp, idx, i);
192                         return -ENOMEM;
193                 }
194         }
195         return 0;
196 }
197
198 /**
199  * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
200  * @pgsz: page size
201  * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
202  */
203 int cxgb3i_ddp_find_page_index(unsigned long pgsz)
204 {
205         int i;
206
207         for (i = 0; i < DDP_PGIDX_MAX; i++) {
208                 if (pgsz == (1UL << ddp_page_shift[i]))
209                         return i;
210         }
211         ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
212         return DDP_PGIDX_MAX;
213 }
214 EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
215
216 static inline void ddp_gl_unmap(struct pci_dev *pdev,
217                                 struct cxgb3i_gather_list *gl)
218 {
219         int i;
220
221         for (i = 0; i < gl->nelem; i++)
222                 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
223                                PCI_DMA_FROMDEVICE);
224 }
225
226 static inline int ddp_gl_map(struct pci_dev *pdev,
227                              struct cxgb3i_gather_list *gl)
228 {
229         int i;
230
231         for (i = 0; i < gl->nelem; i++) {
232                 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
233                                                 PAGE_SIZE,
234                                                 PCI_DMA_FROMDEVICE);
235                 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
236                         goto unmap;
237         }
238
239         return i;
240
241 unmap:
242         if (i) {
243                 unsigned int nelem = gl->nelem;
244
245                 gl->nelem = i;
246                 ddp_gl_unmap(pdev, gl);
247                 gl->nelem = nelem;
248         }
249         return -ENOMEM;
250 }
251
252 /**
253  * cxgb3i_ddp_make_gl - build ddp page buffer list
254  * @xferlen: total buffer length
255  * @sgl: page buffer scatter-gather list
256  * @sgcnt: # of page buffers
257  * @pdev: pci_dev, used for pci map
258  * @gfp: allocation mode
259  *
260  * construct a ddp page buffer list from the scsi scattergather list.
261  * coalesce buffers as much as possible, and obtain dma addresses for
262  * each page.
263  *
264  * Return the cxgb3i_gather_list constructed from the page buffers if the
265  * memory can be used for ddp. Return NULL otherwise.
266  */
267 struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
268                                               struct scatterlist *sgl,
269                                               unsigned int sgcnt,
270                                               struct pci_dev *pdev,
271                                               gfp_t gfp)
272 {
273         struct cxgb3i_gather_list *gl;
274         struct scatterlist *sg = sgl;
275         struct page *sgpage = sg_page(sg);
276         unsigned int sglen = sg->length;
277         unsigned int sgoffset = sg->offset;
278         unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
279                               PAGE_SHIFT;
280         int i = 1, j = 0;
281
282         if (xferlen < DDP_THRESHOLD) {
283                 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
284                               xferlen, DDP_THRESHOLD);
285                 return NULL;
286         }
287
288         gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
289                      npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
290                      gfp);
291         if (!gl)
292                 return NULL;
293
294         gl->pages = (struct page **)&gl->phys_addr[npages];
295         gl->length = xferlen;
296         gl->offset = sgoffset;
297         gl->pages[0] = sgpage;
298
299         sg = sg_next(sg);
300         while (sg) {
301                 struct page *page = sg_page(sg);
302
303                 if (sgpage == page && sg->offset == sgoffset + sglen)
304                         sglen += sg->length;
305                 else {
306                         /* make sure the sgl is fit for ddp:
307                          * each has the same page size, and
308                          * all of the middle pages are used completely
309                          */
310                         if ((j && sgoffset) ||
311                             ((i != sgcnt - 1) &&
312                              ((sglen + sgoffset) & ~PAGE_MASK)))
313                                 goto error_out;
314
315                         j++;
316                         if (j == gl->nelem || sg->offset)
317                                 goto error_out;
318                         gl->pages[j] = page;
319                         sglen = sg->length;
320                         sgoffset = sg->offset;
321                         sgpage = page;
322                 }
323                 i++;
324                 sg = sg_next(sg);
325         }
326         gl->nelem = ++j;
327
328         if (ddp_gl_map(pdev, gl) < 0)
329                 goto error_out;
330
331         return gl;
332
333 error_out:
334         kfree(gl);
335         return NULL;
336 }
337 EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
338
339 /**
340  * cxgb3i_ddp_release_gl - release a page buffer list
341  * @gl: a ddp page buffer list
342  * @pdev: pci_dev used for pci_unmap
343  * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
344  */
345 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
346                            struct pci_dev *pdev)
347 {
348         ddp_gl_unmap(pdev, gl);
349         kfree(gl);
350 }
351 EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
352
353 /**
354  * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
355  * @tdev: t3cdev adapter
356  * @tid: connection id
357  * @tformat: tag format
358  * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
359  *        ddp/hw tag
360  * @gl: the page momory list
361  * @gfp: allocation mode
362  *
363  * ddp setup for a given page buffer list and construct the ddp tag.
364  * return 0 if success, < 0 otherwise.
365  */
366 int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
367                            struct cxgb3i_tag_format *tformat, u32 *tagp,
368                            struct cxgb3i_gather_list *gl, gfp_t gfp)
369 {
370         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
371         struct pagepod_hdr hdr;
372         unsigned int npods;
373         int idx = -1, idx_max;
374         int err = -ENOMEM;
375         u32 sw_tag = *tagp;
376         u32 tag;
377
378         if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
379                 gl->length < DDP_THRESHOLD) {
380                 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
381                               page_idx, gl->length, DDP_THRESHOLD);
382                 return -EINVAL;
383         }
384
385         npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
386         idx_max = ddp->nppods - npods + 1;
387
388         if (ddp->idx_last == ddp->nppods)
389                 idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
390         else {
391                 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
392                                               idx_max, npods, gl);
393                 if (idx < 0 && ddp->idx_last >= npods)
394                         idx = ddp_find_unused_entries(ddp, 0,
395                                                       ddp->idx_last - npods + 1,
396                                                       npods, gl);
397         }
398         if (idx < 0) {
399                 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
400                               gl->length, gl->nelem, npods);
401                 return idx;
402         }
403
404         err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
405         if (err < 0)
406                 goto unmark_entries;
407
408         tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
409         tag |= idx << PPOD_IDX_SHIFT;
410
411         hdr.rsvd = 0;
412         hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
413         hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
414         hdr.maxoffset = htonl(gl->length);
415         hdr.pgoffset = htonl(gl->offset);
416
417         err = set_ddp_map(ddp, &hdr, idx, npods, gl);
418         if (err < 0)
419                 goto free_gl_skb;
420
421         ddp->idx_last = idx;
422         ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
423                       gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
424                       idx, npods);
425         *tagp = tag;
426         return 0;
427
428 free_gl_skb:
429         ddp_free_gl_skb(ddp, idx, npods);
430 unmark_entries:
431         ddp_unmark_entries(ddp, idx, npods);
432         return err;
433 }
434 EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
435
436 /**
437  * cxgb3i_ddp_tag_release - release a ddp tag
438  * @tdev: t3cdev adapter
439  * @tag: ddp tag
440  * ddp cleanup for a given ddp tag and release all the resources held
441  */
442 void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
443 {
444         struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
445         u32 idx;
446
447         if (!ddp) {
448                 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
449                 return;
450         }
451
452         idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
453         if (idx < ddp->nppods) {
454                 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
455                 unsigned int npods;
456
457                 if (!gl) {
458                         ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
459                                       tag, idx);
460                         return;
461                 }
462                 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
463                 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
464                               tag, idx, npods);
465                 clear_ddp_map(ddp, idx, npods);
466                 ddp_unmark_entries(ddp, idx, npods);
467                 cxgb3i_ddp_release_gl(gl, ddp->pdev);
468         } else
469                 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
470                               tag, idx, ddp->nppods);
471 }
472 EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
473
474 static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
475                             int reply)
476 {
477         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
478                                         GFP_KERNEL);
479         struct cpl_set_tcb_field *req;
480         u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
481
482         if (!skb)
483                 return -ENOMEM;
484
485         /* set up ulp submode and page size */
486         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
487         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
488         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
489         req->reply = V_NO_REPLY(reply ? 0 : 1);
490         req->cpu_idx = 0;
491         req->word = htons(31);
492         req->mask = cpu_to_be64(0xF0000000);
493         req->val = cpu_to_be64(val << 28);
494         skb->priority = CPL_PRIORITY_CONTROL;
495
496         cxgb3_ofld_send(tdev, skb);
497         return 0;
498 }
499
500 /**
501  * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
502  * @tdev: t3cdev adapter
503  * @tid: connection id
504  * @reply: request reply from h/w
505  * set up the ddp page size based on the host PAGE_SIZE for a connection
506  * identified by tid
507  */
508 int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
509                                     int reply)
510 {
511         return setup_conn_pgidx(tdev, tid, page_idx, reply);
512 }
513 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
514
515 /**
516  * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
517  * @tdev: t3cdev adapter
518  * @tid: connection id
519  * @reply: request reply from h/w
520  * @pgsz: ddp page size
521  * set up the ddp page size for a connection identified by tid
522  */
523 int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
524                                 int reply, unsigned long pgsz)
525 {
526         int pgidx = cxgb3i_ddp_find_page_index(pgsz);
527
528         return setup_conn_pgidx(tdev, tid, pgidx, reply);
529 }
530 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
531
532 /**
533  * cxgb3i_setup_conn_digest - setup conn. digest setting
534  * @tdev: t3cdev adapter
535  * @tid: connection id
536  * @hcrc: header digest enabled
537  * @dcrc: data digest enabled
538  * @reply: request reply from h/w
539  * set up the iscsi digest settings for a connection identified by tid
540  */
541 int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
542                              int hcrc, int dcrc, int reply)
543 {
544         struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
545                                         GFP_KERNEL);
546         struct cpl_set_tcb_field *req;
547         u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
548
549         if (!skb)
550                 return -ENOMEM;
551
552         /* set up ulp submode and page size */
553         req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
554         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
555         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
556         req->reply = V_NO_REPLY(reply ? 0 : 1);
557         req->cpu_idx = 0;
558         req->word = htons(31);
559         req->mask = cpu_to_be64(0x0F000000);
560         req->val = cpu_to_be64(val << 24);
561         skb->priority = CPL_PRIORITY_CONTROL;
562
563         cxgb3_ofld_send(tdev, skb);
564         return 0;
565 }
566 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
567
568 static int ddp_init(struct t3cdev *tdev)
569 {
570         struct cxgb3i_ddp_info *ddp;
571         struct ulp_iscsi_info uinfo;
572         unsigned int ppmax, bits;
573         int i, err;
574         static int vers_printed;
575
576         if (!vers_printed) {
577                 printk(KERN_INFO "%s", version);
578                 vers_printed = 1;
579         }
580
581         err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
582         if (err < 0) {
583                 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
584                                  tdev->name, err);
585                 return err;
586         }
587
588         ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
589         bits = __ilog2_u32(ppmax) + 1;
590         if (bits > PPOD_IDX_MAX_SIZE)
591                 bits = PPOD_IDX_MAX_SIZE;
592         ppmax = (1 << (bits - 1)) - 1;
593
594         ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
595                                    ppmax *
596                                         (sizeof(struct cxgb3i_gather_list *) +
597                                         sizeof(struct sk_buff *)),
598                                    GFP_KERNEL);
599         if (!ddp) {
600                 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
601                              tdev->name, ppmax);
602                 return 0;
603         }
604         ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
605         ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
606                                           ppmax *
607                                           sizeof(struct cxgb3i_gather_list *));
608         spin_lock_init(&ddp->map_lock);
609
610         ddp->tdev = tdev;
611         ddp->pdev = uinfo.pdev;
612         ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
613         ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
614         ddp->llimit = uinfo.llimit;
615         ddp->ulimit = uinfo.ulimit;
616         ddp->nppods = ppmax;
617         ddp->idx_last = ppmax;
618         ddp->idx_bits = bits;
619         ddp->idx_mask = (1 << bits) - 1;
620         ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
621
622         uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
623         for (i = 0; i < DDP_PGIDX_MAX; i++)
624                 uinfo.pgsz_factor[i] = ddp_page_order[i];
625         uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
626
627         err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
628         if (err < 0) {
629                 ddp_log_warn("%s unable to set iscsi param err=%d, "
630                               "ddp disabled.\n", tdev->name, err);
631                 goto free_ddp_map;
632         }
633
634         tdev->ulp_iscsi = ddp;
635
636         /* add to the list */
637         write_lock(&cxgb3i_ddp_rwlock);
638         list_add_tail(&ddp->list, &cxgb3i_ddp_list);
639         write_unlock(&cxgb3i_ddp_rwlock);
640
641         ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
642                         "pkt %u/%u, %u/%u.\n",
643                         ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
644                         ddp->idx_mask, ddp->rsvd_tag_mask,
645                         ddp->max_txsz, uinfo.max_txsz,
646                         ddp->max_rxsz, uinfo.max_rxsz);
647         return 0;
648
649 free_ddp_map:
650         cxgb3i_free_big_mem(ddp);
651         return err;
652 }
653
654 /**
655  * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
656  * @tdev: t3cdev adapter
657  * @tformat: tag format
658  * @txsz: max tx pdu payload size, filled in by this func.
659  * @rxsz: max rx pdu payload size, filled in by this func.
660  * initialize the ddp pagepod manager for a given adapter if needed and
661  * setup the tag format for a given iscsi entity
662  */
663 int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
664                             struct cxgb3i_tag_format *tformat,
665                             unsigned int *txsz, unsigned int *rxsz)
666 {
667         struct cxgb3i_ddp_info *ddp;
668         unsigned char idx_bits;
669
670         if (!tformat)
671                 return -EINVAL;
672
673         if (!tdev->ulp_iscsi) {
674                 int err = ddp_init(tdev);
675                 if (err < 0)
676                         return err;
677         }
678         ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
679
680         idx_bits = 32 - tformat->sw_bits;
681         tformat->rsvd_bits = ddp->idx_bits;
682         tformat->rsvd_shift = PPOD_IDX_SHIFT;
683         tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
684
685         ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
686                       tformat->sw_bits, tformat->rsvd_bits,
687                       tformat->rsvd_shift, tformat->rsvd_mask);
688
689         *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
690                         ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
691         *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
692                         ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
693         ddp_log_info("max payload size: %u/%u, %u/%u.\n",
694                      *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
695         return 0;
696 }
697 EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
698
699 static void ddp_release(struct cxgb3i_ddp_info *ddp)
700 {
701         int i = 0;
702         struct t3cdev *tdev = ddp->tdev;
703
704         tdev->ulp_iscsi = NULL;
705         while (i < ddp->nppods) {
706                 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
707                 if (gl) {
708                         int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
709                                      >> PPOD_PAGES_SHIFT;
710
711                         kfree(gl);
712                         ddp_free_gl_skb(ddp, i, npods);
713                 } else
714                         i++;
715         }
716         cxgb3i_free_big_mem(ddp);
717 }
718
719 /**
720  * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
721  * @tdev: t3cdev adapter
722  * release all the resource held by the ddp pagepod manager for a given
723  * adapter if needed
724  */
725 void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
726 {
727         struct cxgb3i_ddp_info *ddp;
728
729         /* remove from the list */
730         write_lock(&cxgb3i_ddp_rwlock);
731         list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
732                 if (ddp->tdev == tdev) {
733                         list_del(&ddp->list);
734                         break;
735                 }
736         }
737         write_unlock(&cxgb3i_ddp_rwlock);
738
739         if (ddp)
740                 ddp_release(ddp);
741 }
742 EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
743
744 /**
745  * cxgb3i_ddp_init_module - module init entry point
746  * initialize any driver wide global data structures
747  */
748 static int __init cxgb3i_ddp_init_module(void)
749 {
750         page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
751         ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
752                      PAGE_SIZE, page_idx);
753         return 0;
754 }
755
756 /**
757  * cxgb3i_ddp_exit_module - module cleanup/exit entry point
758  * go through the ddp list and release any resource held.
759  */
760 static void __exit cxgb3i_ddp_exit_module(void)
761 {
762         struct cxgb3i_ddp_info *ddp;
763
764         /* release all ddp manager if there is any */
765         write_lock(&cxgb3i_ddp_rwlock);
766         list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
767                 list_del(&ddp->list);
768                 ddp_release(ddp);
769         }
770         write_unlock(&cxgb3i_ddp_rwlock);
771 }
772
773 module_init(cxgb3i_ddp_init_module);
774 module_exit(cxgb3i_ddp_exit_module);