2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
41 #include "iscsi_iser.h"
43 /* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Data size is stored in
45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
48 static int iser_prepare_read_cmd(struct iscsi_task *task)
51 struct iscsi_iser_task *iser_task = task->dd_data;
52 struct iser_device *device = iser_task->ib_conn->device;
53 struct iser_regd_buf *regd_buf;
55 struct iser_hdr *hdr = &iser_task->desc.iser_header;
56 struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
58 err = iser_dma_map_task_data(iser_task,
65 if (scsi_prot_sg_count(iser_task->sc)) {
66 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
68 err = iser_dma_map_task_data(iser_task,
76 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
78 iser_err("Failed to set up Data-IN RDMA\n");
81 regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
83 hdr->flags |= ISER_RSV;
84 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
85 hdr->read_va = cpu_to_be64(regd_buf->reg.va);
87 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
88 task->itt, regd_buf->reg.rkey,
89 (unsigned long long)regd_buf->reg.va);
94 /* Register user buffer memory and initialize passive rdma
95 * dto descriptor. Data size is stored in
96 * task->data[ISER_DIR_OUT].data_len, Protection size
97 * is stored at task->prot[ISER_DIR_OUT].data_len
100 iser_prepare_write_cmd(struct iscsi_task *task,
102 unsigned int unsol_sz,
105 struct iscsi_iser_task *iser_task = task->dd_data;
106 struct iser_device *device = iser_task->ib_conn->device;
107 struct iser_regd_buf *regd_buf;
109 struct iser_hdr *hdr = &iser_task->desc.iser_header;
110 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
111 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
113 err = iser_dma_map_task_data(iser_task,
120 if (scsi_prot_sg_count(iser_task->sc)) {
121 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
123 err = iser_dma_map_task_data(iser_task,
131 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
133 iser_err("Failed to register write cmd RDMA mem\n");
137 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
139 if (unsol_sz < edtl) {
140 hdr->flags |= ISER_WSV;
141 hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
142 hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
144 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
145 "VA:%#llX + unsol:%d\n",
146 task->itt, regd_buf->reg.rkey,
147 (unsigned long long)regd_buf->reg.va, unsol_sz);
151 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
153 tx_dsg->addr = regd_buf->reg.va;
154 tx_dsg->length = imm_sz;
155 tx_dsg->lkey = regd_buf->reg.lkey;
156 iser_task->desc.num_sge = 2;
162 /* creates a new tx descriptor and adds header regd buffer */
163 static void iser_create_send_desc(struct iser_conn *ib_conn,
164 struct iser_tx_desc *tx_desc)
166 struct iser_device *device = ib_conn->device;
168 ib_dma_sync_single_for_cpu(device->ib_device,
169 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
171 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
172 tx_desc->iser_header.flags = ISER_VER;
174 tx_desc->num_sge = 1;
176 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
177 tx_desc->tx_sg[0].lkey = device->mr->lkey;
178 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
182 static void iser_free_login_buf(struct iser_conn *ib_conn)
184 if (!ib_conn->login_buf)
187 if (ib_conn->login_req_dma)
188 ib_dma_unmap_single(ib_conn->device->ib_device,
189 ib_conn->login_req_dma,
190 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
192 if (ib_conn->login_resp_dma)
193 ib_dma_unmap_single(ib_conn->device->ib_device,
194 ib_conn->login_resp_dma,
195 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
197 kfree(ib_conn->login_buf);
199 /* make sure we never redo any unmapping */
200 ib_conn->login_req_dma = 0;
201 ib_conn->login_resp_dma = 0;
202 ib_conn->login_buf = NULL;
205 static int iser_alloc_login_buf(struct iser_conn *ib_conn)
207 struct iser_device *device;
208 int req_err, resp_err;
210 BUG_ON(ib_conn->device == NULL);
212 device = ib_conn->device;
214 ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
215 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
216 if (!ib_conn->login_buf)
219 ib_conn->login_req_buf = ib_conn->login_buf;
220 ib_conn->login_resp_buf = ib_conn->login_buf +
221 ISCSI_DEF_MAX_RECV_SEG_LEN;
223 ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
224 (void *)ib_conn->login_req_buf,
225 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
227 ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
228 (void *)ib_conn->login_resp_buf,
229 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
231 req_err = ib_dma_mapping_error(device->ib_device,
232 ib_conn->login_req_dma);
233 resp_err = ib_dma_mapping_error(device->ib_device,
234 ib_conn->login_resp_dma);
236 if (req_err || resp_err) {
238 ib_conn->login_req_dma = 0;
240 ib_conn->login_resp_dma = 0;
246 iser_free_login_buf(ib_conn);
249 iser_err("unable to alloc or map login buf\n");
253 int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
257 struct iser_rx_desc *rx_desc;
258 struct ib_sge *rx_sg;
259 struct iser_device *device = ib_conn->device;
261 ib_conn->qp_max_recv_dtos = session->cmds_max;
262 ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
263 ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
265 if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
266 goto create_rdma_reg_res_failed;
268 if (iser_alloc_login_buf(ib_conn))
269 goto alloc_login_buf_fail;
271 ib_conn->rx_descs = kmalloc(session->cmds_max *
272 sizeof(struct iser_rx_desc), GFP_KERNEL);
273 if (!ib_conn->rx_descs)
274 goto rx_desc_alloc_fail;
276 rx_desc = ib_conn->rx_descs;
278 for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) {
279 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
280 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
281 if (ib_dma_mapping_error(device->ib_device, dma_addr))
282 goto rx_desc_dma_map_failed;
284 rx_desc->dma_addr = dma_addr;
286 rx_sg = &rx_desc->rx_sg;
287 rx_sg->addr = rx_desc->dma_addr;
288 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
289 rx_sg->lkey = device->mr->lkey;
292 ib_conn->rx_desc_head = 0;
295 rx_desc_dma_map_failed:
296 rx_desc = ib_conn->rx_descs;
297 for (j = 0; j < i; j++, rx_desc++)
298 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
299 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
300 kfree(ib_conn->rx_descs);
301 ib_conn->rx_descs = NULL;
303 iser_free_login_buf(ib_conn);
304 alloc_login_buf_fail:
305 device->iser_free_rdma_reg_res(ib_conn);
306 create_rdma_reg_res_failed:
307 iser_err("failed allocating rx descriptors / data buffers\n");
311 void iser_free_rx_descriptors(struct iser_conn *ib_conn)
314 struct iser_rx_desc *rx_desc;
315 struct iser_device *device = ib_conn->device;
317 if (!ib_conn->rx_descs)
320 if (device->iser_free_rdma_reg_res)
321 device->iser_free_rdma_reg_res(ib_conn);
323 rx_desc = ib_conn->rx_descs;
324 for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
325 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
326 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
327 kfree(ib_conn->rx_descs);
328 /* make sure we never redo any unmapping */
329 ib_conn->rx_descs = NULL;
332 iser_free_login_buf(ib_conn);
335 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
337 struct iser_conn *ib_conn = conn->dd_data;
338 struct iscsi_session *session = conn->session;
340 iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
341 /* check if this is the last login - going to full feature phase */
342 if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
346 * Check that there is one posted recv buffer (for the last login
347 * response) and no posted send buffers left - they must have been
348 * consumed during previous login phases.
350 WARN_ON(ib_conn->post_recv_buf_count != 1);
351 WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0);
353 if (session->discovery_sess) {
354 iser_info("Discovery session, re-using login RX buffer\n");
357 iser_info("Normal session, posting batch of RX %d buffers\n",
358 ib_conn->min_posted_rx);
360 /* Initial post receive buffers */
361 if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx))
368 * iser_send_command - send command PDU
370 int iser_send_command(struct iscsi_conn *conn,
371 struct iscsi_task *task)
373 struct iser_conn *ib_conn = conn->dd_data;
374 struct iscsi_iser_task *iser_task = task->dd_data;
377 struct iser_data_buf *data_buf, *prot_buf;
378 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
379 struct scsi_cmnd *sc = task->sc;
380 struct iser_tx_desc *tx_desc = &iser_task->desc;
382 edtl = ntohl(hdr->data_length);
384 /* build the tx desc regd header and add it to the tx desc dto */
385 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
386 iser_create_send_desc(ib_conn, tx_desc);
388 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
389 data_buf = &iser_task->data[ISER_DIR_IN];
390 prot_buf = &iser_task->prot[ISER_DIR_IN];
392 data_buf = &iser_task->data[ISER_DIR_OUT];
393 prot_buf = &iser_task->prot[ISER_DIR_OUT];
396 if (scsi_sg_count(sc)) { /* using a scatter list */
397 data_buf->buf = scsi_sglist(sc);
398 data_buf->size = scsi_sg_count(sc);
400 data_buf->data_len = scsi_bufflen(sc);
402 if (scsi_prot_sg_count(sc)) {
403 prot_buf->buf = scsi_prot_sglist(sc);
404 prot_buf->size = scsi_prot_sg_count(sc);
405 prot_buf->data_len = data_buf->data_len >>
406 ilog2(sc->device->sector_size) * 8;
409 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
410 err = iser_prepare_read_cmd(task);
412 goto send_command_error;
414 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
415 err = iser_prepare_write_cmd(task,
418 task->unsol_r2t.data_length,
421 goto send_command_error;
424 iser_task->status = ISER_TASK_STATUS_STARTED;
426 err = iser_post_send(ib_conn, tx_desc);
431 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
436 * iser_send_data_out - send data out PDU
438 int iser_send_data_out(struct iscsi_conn *conn,
439 struct iscsi_task *task,
440 struct iscsi_data *hdr)
442 struct iser_conn *ib_conn = conn->dd_data;
443 struct iscsi_iser_task *iser_task = task->dd_data;
444 struct iser_tx_desc *tx_desc = NULL;
445 struct iser_regd_buf *regd_buf;
446 unsigned long buf_offset;
447 unsigned long data_seg_len;
450 struct ib_sge *tx_dsg;
452 itt = (__force uint32_t)hdr->itt;
453 data_seg_len = ntoh24(hdr->dlength);
454 buf_offset = ntohl(hdr->offset);
456 iser_dbg("%s itt %d dseg_len %d offset %d\n",
457 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
459 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
460 if (tx_desc == NULL) {
461 iser_err("Failed to alloc desc for post dataout\n");
465 tx_desc->type = ISCSI_TX_DATAOUT;
466 tx_desc->iser_header.flags = ISER_VER;
467 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
469 /* build the tx desc */
470 iser_initialize_task_headers(task, tx_desc);
472 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
473 tx_dsg = &tx_desc->tx_sg[1];
474 tx_dsg->addr = regd_buf->reg.va + buf_offset;
475 tx_dsg->length = data_seg_len;
476 tx_dsg->lkey = regd_buf->reg.lkey;
477 tx_desc->num_sge = 2;
479 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
480 iser_err("Offset:%ld & DSL:%ld in Data-Out "
481 "inconsistent with total len:%ld, itt:%d\n",
482 buf_offset, data_seg_len,
483 iser_task->data[ISER_DIR_OUT].data_len, itt);
485 goto send_data_out_error;
487 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
488 itt, buf_offset, data_seg_len);
491 err = iser_post_send(ib_conn, tx_desc);
496 kmem_cache_free(ig.desc_cache, tx_desc);
497 iser_err("conn %p failed err %d\n",conn, err);
501 int iser_send_control(struct iscsi_conn *conn,
502 struct iscsi_task *task)
504 struct iser_conn *ib_conn = conn->dd_data;
505 struct iscsi_iser_task *iser_task = task->dd_data;
506 struct iser_tx_desc *mdesc = &iser_task->desc;
507 unsigned long data_seg_len;
509 struct iser_device *device;
511 /* build the tx desc regd header and add it to the tx desc dto */
512 mdesc->type = ISCSI_TX_CONTROL;
513 iser_create_send_desc(ib_conn, mdesc);
515 device = ib_conn->device;
517 data_seg_len = ntoh24(task->hdr->dlength);
519 if (data_seg_len > 0) {
520 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
521 if (task != conn->login_task) {
522 iser_err("data present on non login task!!!\n");
523 goto send_control_error;
526 ib_dma_sync_single_for_cpu(device->ib_device,
527 ib_conn->login_req_dma, task->data_count,
530 memcpy(ib_conn->login_req_buf, task->data, task->data_count);
532 ib_dma_sync_single_for_device(device->ib_device,
533 ib_conn->login_req_dma, task->data_count,
536 tx_dsg->addr = ib_conn->login_req_dma;
537 tx_dsg->length = task->data_count;
538 tx_dsg->lkey = device->mr->lkey;
542 if (task == conn->login_task) {
543 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
544 task->hdr->opcode, data_seg_len);
545 err = iser_post_recvl(ib_conn);
547 goto send_control_error;
548 err = iser_post_rx_bufs(conn, task->hdr);
550 goto send_control_error;
553 err = iser_post_send(ib_conn, mdesc);
558 iser_err("conn %p failed err %d\n",conn, err);
563 * iser_rcv_dto_completion - recv DTO completion
565 void iser_rcv_completion(struct iser_rx_desc *rx_desc,
566 unsigned long rx_xfer_len,
567 struct iser_conn *ib_conn)
569 struct iscsi_hdr *hdr;
571 int rx_buflen, outstanding, count, err;
573 /* differentiate between login to all other PDUs */
574 if ((char *)rx_desc == ib_conn->login_resp_buf) {
575 rx_dma = ib_conn->login_resp_dma;
576 rx_buflen = ISER_RX_LOGIN_SIZE;
578 rx_dma = rx_desc->dma_addr;
579 rx_buflen = ISER_RX_PAYLOAD_SIZE;
582 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
583 rx_buflen, DMA_FROM_DEVICE);
585 hdr = &rx_desc->iscsi_header;
587 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
588 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
590 iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data,
591 rx_xfer_len - ISER_HEADERS_LEN);
593 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
594 rx_buflen, DMA_FROM_DEVICE);
596 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
597 * task eliminates the need to worry on tasks which are completed in *
598 * parallel to the execution of iser_conn_term. So the code that waits *
599 * for the posted rx bufs refcount to become zero handles everything */
600 ib_conn->post_recv_buf_count--;
602 if (rx_dma == ib_conn->login_resp_dma)
605 outstanding = ib_conn->post_recv_buf_count;
606 if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
607 count = min(ib_conn->qp_max_recv_dtos - outstanding,
608 ib_conn->min_posted_rx);
609 err = iser_post_recvm(ib_conn, count);
611 iser_err("posting %d rx bufs err %d\n", count, err);
615 void iser_snd_completion(struct iser_tx_desc *tx_desc,
616 struct iser_conn *ib_conn)
618 struct iscsi_task *task;
619 struct iser_device *device = ib_conn->device;
621 if (tx_desc->type == ISCSI_TX_DATAOUT) {
622 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
623 ISER_HEADERS_LEN, DMA_TO_DEVICE);
624 kmem_cache_free(ig.desc_cache, tx_desc);
628 atomic_dec(&ib_conn->post_send_buf_count);
630 if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
631 /* this arithmetic is legal by libiscsi dd_data allocation */
632 task = (void *) ((long)(void *)tx_desc -
633 sizeof(struct iscsi_task));
634 if (task->hdr->itt == RESERVED_ITT)
635 iscsi_put_task(task);
639 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
642 iser_task->status = ISER_TASK_STATUS_INIT;
644 iser_task->dir[ISER_DIR_IN] = 0;
645 iser_task->dir[ISER_DIR_OUT] = 0;
647 iser_task->data[ISER_DIR_IN].data_len = 0;
648 iser_task->data[ISER_DIR_OUT].data_len = 0;
650 iser_task->prot[ISER_DIR_IN].data_len = 0;
651 iser_task->prot[ISER_DIR_OUT].data_len = 0;
653 memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
654 sizeof(struct iser_regd_buf));
655 memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
656 sizeof(struct iser_regd_buf));
659 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
661 struct iser_device *device = iser_task->ib_conn->device;
662 int is_rdma_data_aligned = 1;
663 int is_rdma_prot_aligned = 1;
664 int prot_count = scsi_prot_sg_count(iser_task->sc);
666 /* if we were reading, copy back to unaligned sglist,
667 * anyway dma_unmap and free the copy
669 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
670 is_rdma_data_aligned = 0;
671 iser_finalize_rdma_unaligned_sg(iser_task,
672 &iser_task->data[ISER_DIR_IN],
673 &iser_task->data_copy[ISER_DIR_IN],
677 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
678 is_rdma_data_aligned = 0;
679 iser_finalize_rdma_unaligned_sg(iser_task,
680 &iser_task->data[ISER_DIR_OUT],
681 &iser_task->data_copy[ISER_DIR_OUT],
685 if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
686 is_rdma_prot_aligned = 0;
687 iser_finalize_rdma_unaligned_sg(iser_task,
688 &iser_task->prot[ISER_DIR_IN],
689 &iser_task->prot_copy[ISER_DIR_IN],
693 if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
694 is_rdma_prot_aligned = 0;
695 iser_finalize_rdma_unaligned_sg(iser_task,
696 &iser_task->prot[ISER_DIR_OUT],
697 &iser_task->prot_copy[ISER_DIR_OUT],
701 if (iser_task->dir[ISER_DIR_IN]) {
702 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
703 if (is_rdma_data_aligned)
704 iser_dma_unmap_task_data(iser_task,
705 &iser_task->data[ISER_DIR_IN]);
706 if (prot_count && is_rdma_prot_aligned)
707 iser_dma_unmap_task_data(iser_task,
708 &iser_task->prot[ISER_DIR_IN]);
711 if (iser_task->dir[ISER_DIR_OUT]) {
712 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
713 if (is_rdma_data_aligned)
714 iser_dma_unmap_task_data(iser_task,
715 &iser_task->data[ISER_DIR_OUT]);
716 if (prot_count && is_rdma_prot_aligned)
717 iser_dma_unmap_task_data(iser_task,
718 &iser_task->prot[ISER_DIR_OUT]);