struct list_head list;
struct request *rq;
struct bio *bio;
+ struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
struct sg_io_v4 __user *uhdr;
wake_up(&bd->wq_free);
}
-static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
- struct bsg_command *bc = NULL;
+ struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
+ bc = ERR_PTR(-ENOMEM);
goto out;
}
return ret;
}
-/*
- * get a new free command, blocking if needed and specified
- */
-static struct bsg_command *bsg_get_command(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret;
-
- do {
- bc = __bsg_alloc_command(bd);
- if (bc)
- break;
-
- ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE);
- if (ret) {
- bc = ERR_PTR(ret);
- break;
- }
-
- } while (1);
-
- return bc;
-}
-
static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
- if (blk_verify_command(rq->cmd, has_write_perm))
+
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+ } else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
static int
bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
{
+ int ret = 0;
+
if (hdr->guard != 'Q')
return -EINVAL;
if (hdr->request_len > BLK_MAX_CDB)
hdr->din_xfer_len > (q->max_sectors << 9))
return -EIO;
- /* not supported currently */
- if (hdr->protocol || hdr->subprotocol)
- return -EINVAL;
-
- /*
- * looks sane, if no data then it should be fine from our POV
- */
- if (!hdr->dout_xfer_len && !hdr->din_xfer_len)
- return 0;
-
- /* not supported currently */
- if (hdr->dout_xfer_len && hdr->din_xfer_len)
- return -EINVAL;
+ switch (hdr->protocol) {
+ case BSG_PROTOCOL_SCSI:
+ switch (hdr->subprotocol) {
+ case BSG_SUB_PROTOCOL_SCSI_CMD:
+ case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
*rw = hdr->dout_xfer_len ? WRITE : READ;
-
- return 0;
+ return ret;
}
/*
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
request_queue_t *q = bd->queue;
- struct request *rq;
+ struct request *rq, *next_rq = NULL;
int ret, rw = 0; /* shut up gcc */
unsigned int dxfer_len;
void *dxferp = NULL;
* map scatter-gather elements seperately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
&bd->flags));
- if (ret) {
- blk_put_request(rq);
- return ERR_PTR(ret);
+ if (ret)
+ goto out;
+
+ if (rw == WRITE && hdr->din_xfer_len) {
+ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!next_rq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rq->next_rq = next_rq;
+
+ dxferp = (void*)(unsigned long)hdr->din_xferp;
+ ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+ if (ret)
+ goto out;
}
if (hdr->dout_xfer_len) {
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
- if (ret) {
- dprintk("failed map at %d\n", ret);
- blk_put_request(rq);
- rq = ERR_PTR(ret);
- }
+ if (ret)
+ goto out;
}
-
return rq;
+out:
+ blk_put_request(rq);
+ if (next_rq) {
+ blk_rq_unmap_user(next_rq->bio);
+ blk_put_request(next_rq);
+ }
+ return ERR_PTR(ret);
}
/*
*/
bc->rq = rq;
bc->bio = rq->bio;
+ if (rq->next_rq)
+ bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
/*
* Get a finished command from the done list
*/
-static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
if (bc)
break;
- ret = bsg_io_schedule(bd, state);
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ bc = ERR_PTR(-EAGAIN);
+ break;
+ }
+
+ ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
- bc = ERR_PTR(ret);
+ bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
return bc;
}
-static struct bsg_command *
-bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov)
-{
- return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE);
-}
-
-static struct bsg_command *
-bsg_get_done_cmd_nosignals(struct bsg_device *bd)
-{
- return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE);
-}
-
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- struct bio *bio)
+ struct bio *bio, struct bio *bidi_bio)
{
int ret = 0;
ret = -EFAULT;
}
+ if (rq->next_rq) {
+ blk_rq_unmap_user(bidi_bio);
+ blk_put_request(rq->next_rq);
+ }
+
blk_rq_unmap_user(bio);
blk_put_request(rq);
*/
ret = 0;
do {
- bc = bsg_get_done_cmd_nosignals(bd);
-
- /*
- * we _must_ complete before restarting, because
- * bsg_release can't handle this failing.
- */
- if (PTR_ERR(bc) == -ERESTARTSYS)
- continue;
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
+ spin_lock_irq(&bd->lock);
+ if (!bd->queued_cmds) {
+ spin_unlock_irq(&bd->lock);
break;
}
+ spin_unlock_irq(&bd->lock);
- tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc))
+ break;
+
+ tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
if (!ret)
ret = tret;
return ret;
}
-typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov);
-
static ssize_t
-__bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc,
- struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read)
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
+ const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
- bc = get_bc(bd, iov);
+ bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
- ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
+ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_set_block(bd, file);
bytes_read = 0;
- ret = __bsg_read(buf, count, bsg_get_done_cmd,
- bd, NULL, &bytes_read);
+ ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || (bytes_read && err_block_err(ret)))
while (nr_commands) {
request_queue_t *q = bd->queue;
- bc = bsg_get_command(bd);
- if (!bc)
- break;
+ bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
}
case SG_IO: {
struct request *rq;
- struct bio *bio;
+ struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return PTR_ERR(rq);
bio = rq->bio;
+ if (rq->next_rq)
+ bidi_bio = rq->next_rq->bio;
blk_execute_rq(bd->queue, NULL, rq, 0);
- blk_complete_sgv4_hdr_rq(rq, &hdr, bio);
+ blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;