Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[sfrench/cifs-2.6.git] / drivers / vhost / vsock.c
index f249622ef11bf929875e3de45e05f546ad9a0198..938aefbc75ecc24cae2637ebf9091d275e98b98c 100644 (file)
@@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                size_t nbytes;
                size_t iov_len, payload_len;
                int head;
-               bool restore_flag = false;
+               u32 flags_to_restore = 0;
 
                spin_lock_bh(&vsock->send_pkt_list_lock);
                if (list_empty(&vsock->send_pkt_list)) {
@@ -178,16 +178,21 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                         * small rx buffers, headers of packets in rx queue are
                         * created dynamically and are initialized with header
                         * of current packet(except length). But in case of
-                        * SOCK_SEQPACKET, we also must clear record delimeter
-                        * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
-                        * packet with delimeter(which marks end of record),
-                        * there will be sequence of packets with delimeter
-                        * bit set. After initialized header will be copied to
-                        * rx buffer, this bit will be restored.
+                        * SOCK_SEQPACKET, we also must clear message delimeter
+                        * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+                        * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+                        * there will be sequence of packets with these
+                        * bits set. After initialized header will be copied to
+                        * rx buffer, these required bits will be restored.
                         */
-                       if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
-                               pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
-                               restore_flag = true;
+                       if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+                               pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+                               flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+                               if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+                                       pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+                                       flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+                               }
                        }
                }
 
@@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                 * to send it with the next available buffer.
                 */
                if (pkt->off < pkt->len) {
-                       if (restore_flag)
-                               pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+                       pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
 
                        /* We are queueing the same virtio_vsock_pkt to handle
                         * the remaining bytes, and we want to deliver it