4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/uaccess.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51 #include <linux/pagemap.h>
52 #include <linux/uio.h>
54 #include <net/protocol.h>
55 #include <linux/skbuff.h>
57 #include <net/checksum.h>
59 #include <net/tcp_states.h>
60 #include <trace/events/skb.h>
61 #include <net/busy_poll.h>
64 * Is a socket 'connection oriented' ?
66 static inline int connection_based(struct sock *sk)
68 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
71 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
74 unsigned long bits = (unsigned long)key;
77 * Avoid a wakeup if event not interesting for us
79 if (bits && !(bits & (POLLIN | POLLERR)))
81 return autoremove_wake_function(wait, mode, sync, key);
84 * Wait for the last received packet to be different from skb
86 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
87 const struct sk_buff *skb)
90 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
92 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
95 error = sock_error(sk);
99 if (sk->sk_receive_queue.prev != skb)
102 /* Socket shut down? */
103 if (sk->sk_shutdown & RCV_SHUTDOWN)
106 /* Sequenced packets can come disconnected.
107 * If so we report the problem
110 if (connection_based(sk) &&
111 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
115 if (signal_pending(current))
119 *timeo_p = schedule_timeout(*timeo_p);
121 finish_wait(sk_sleep(sk), &wait);
124 error = sock_intr_errno(*timeo_p);
133 EXPORT_SYMBOL(__skb_wait_for_more_packets);
135 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
137 struct sk_buff *nskb;
142 /* We have to unshare an skb before modifying it. */
143 if (!skb_shared(skb))
146 nskb = skb_clone(skb, GFP_ATOMIC);
148 return ERR_PTR(-ENOMEM);
150 skb->prev->next = nskb;
151 skb->next->prev = nskb;
152 nskb->prev = skb->prev;
153 nskb->next = skb->next;
164 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
165 struct sk_buff_head *queue,
167 void (*destructor)(struct sock *sk,
168 struct sk_buff *skb),
169 int *peeked, int *off, int *err,
170 struct sk_buff **last)
176 skb_queue_walk(queue, skb) {
177 if (flags & MSG_PEEK) {
178 if (_off >= skb->len && (skb->len || _off ||
184 skb = skb_set_peeked(skb);
185 if (unlikely(IS_ERR(skb))) {
191 refcount_inc(&skb->users);
193 __skb_unlink(skb, queue);
204 * __skb_try_recv_datagram - Receive a datagram skbuff
206 * @flags: MSG\_ flags
207 * @destructor: invoked under the receive lock on successful dequeue
208 * @peeked: returns non-zero if this packet has been seen before
209 * @off: an offset in bytes to peek skb from. Returns an offset
210 * within an skb where data actually starts
211 * @err: error code returned
212 * @last: set to last peeked message to inform the wait function
213 * what to look for when peeking
215 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
216 * and possible races. This replaces identical code in packet, raw and
217 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
218 * the long standing peek and read race for datagram sockets. If you
219 * alter this routine remember it must be re-entrant.
221 * This function will lock the socket if a skb is returned, so
222 * the caller needs to unlock the socket in that case (usually by
223 * calling skb_free_datagram). Returns NULL with @err set to
224 * -EAGAIN if no data was available or to some other value if an
225 * error was detected.
227 * * It does not lock socket since today. This function is
228 * * free of race conditions. This measure should/can improve
229 * * significantly datagram socket latencies at high loads,
230 * * when data copying to user space takes lots of time.
231 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
235 * The order of the tests when we find no data waiting are specified
236 * quite explicitly by POSIX 1003.1g, don't change them without having
237 * the standard around please.
239 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
240 void (*destructor)(struct sock *sk,
241 struct sk_buff *skb),
242 int *peeked, int *off, int *err,
243 struct sk_buff **last)
245 struct sk_buff_head *queue = &sk->sk_receive_queue;
247 unsigned long cpu_flags;
249 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
251 int error = sock_error(sk);
258 /* Again only user level code calls this function, so nothing
259 * interrupt level will suddenly eat the receive_queue.
261 * Look at current nfs client by the way...
262 * However, this function was correct in any case. 8)
264 spin_lock_irqsave(&queue->lock, cpu_flags);
265 skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
266 peeked, off, &error, last);
267 spin_unlock_irqrestore(&queue->lock, cpu_flags);
273 if (!sk_can_busy_loop(sk))
276 sk_busy_loop(sk, flags & MSG_DONTWAIT);
277 } while (!skb_queue_empty(&sk->sk_receive_queue));
285 EXPORT_SYMBOL(__skb_try_recv_datagram);
287 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
288 void (*destructor)(struct sock *sk,
289 struct sk_buff *skb),
290 int *peeked, int *off, int *err)
292 struct sk_buff *skb, *last;
295 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
298 skb = __skb_try_recv_datagram(sk, flags, destructor, peeked,
306 !__skb_wait_for_more_packets(sk, err, &timeo, last));
310 EXPORT_SYMBOL(__skb_recv_datagram);
312 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
313 int noblock, int *err)
317 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
318 NULL, &peeked, &off, err);
320 EXPORT_SYMBOL(skb_recv_datagram);
322 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
325 sk_mem_reclaim_partial(sk);
327 EXPORT_SYMBOL(skb_free_datagram);
329 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
333 if (!skb_unref(skb)) {
334 sk_peek_offset_bwd(sk, len);
338 slow = lock_sock_fast(sk);
339 sk_peek_offset_bwd(sk, len);
341 sk_mem_reclaim_partial(sk);
342 unlock_sock_fast(sk, slow);
344 /* skb is now orphaned, can be freed outside of locked section */
347 EXPORT_SYMBOL(__skb_free_datagram_locked);
349 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
350 struct sk_buff *skb, unsigned int flags,
351 void (*destructor)(struct sock *sk,
352 struct sk_buff *skb))
356 if (flags & MSG_PEEK) {
358 spin_lock_bh(&sk_queue->lock);
359 if (skb == skb_peek(sk_queue)) {
360 __skb_unlink(skb, sk_queue);
361 refcount_dec(&skb->users);
366 spin_unlock_bh(&sk_queue->lock);
369 atomic_inc(&sk->sk_drops);
372 EXPORT_SYMBOL(__sk_queue_drop_skb);
375 * skb_kill_datagram - Free a datagram skbuff forcibly
377 * @skb: datagram skbuff
378 * @flags: MSG\_ flags
380 * This function frees a datagram skbuff that was received by
381 * skb_recv_datagram. The flags argument must match the one
382 * used for skb_recv_datagram.
384 * If the MSG_PEEK flag is set, and the packet is still on the
385 * receive queue of the socket, it will be taken off the queue
386 * before it is freed.
388 * This function currently only disables BH when acquiring the
389 * sk_receive_queue lock. Therefore it must not be used in a
390 * context where that lock is acquired in an IRQ context.
392 * It returns 0 if the packet was removed by us.
395 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
397 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
401 sk_mem_reclaim_partial(sk);
404 EXPORT_SYMBOL(skb_kill_datagram);
407 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
408 * @skb: buffer to copy
409 * @offset: offset in the buffer to start copying from
410 * @to: iovec iterator to copy to
411 * @len: amount of data to copy from buffer to iovec
413 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
414 struct iov_iter *to, int len)
416 int start = skb_headlen(skb);
417 int i, copy = start - offset, start_off = offset, n;
418 struct sk_buff *frag_iter;
420 trace_skb_copy_datagram_iovec(skb, len);
426 n = copy_to_iter(skb->data + offset, copy, to);
430 if ((len -= copy) == 0)
434 /* Copy paged appendix. Hmm... why does this look so complicated? */
435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
437 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
439 WARN_ON(start > offset + len);
441 end = start + skb_frag_size(frag);
442 if ((copy = end - offset) > 0) {
445 n = copy_page_to_iter(skb_frag_page(frag),
446 frag->page_offset + offset -
457 skb_walk_frags(skb, frag_iter) {
460 WARN_ON(start > offset + len);
462 end = start + frag_iter->len;
463 if ((copy = end - offset) > 0) {
466 if (skb_copy_datagram_iter(frag_iter, offset - start,
469 if ((len -= copy) == 0)
478 /* This is not really a user copy fault, but rather someone
479 * gave us a bogus length on the skb. We should probably
480 * print a warning here as it may indicate a kernel bug.
484 iov_iter_revert(to, offset - start_off);
488 if (iov_iter_count(to))
493 EXPORT_SYMBOL(skb_copy_datagram_iter);
496 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
497 * @skb: buffer to copy
498 * @offset: offset in the buffer to start copying to
499 * @from: the copy source
500 * @len: amount of data to copy to buffer from iovec
502 * Returns 0 or -EFAULT.
504 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
505 struct iov_iter *from,
508 int start = skb_headlen(skb);
509 int i, copy = start - offset;
510 struct sk_buff *frag_iter;
516 if (copy_from_iter(skb->data + offset, copy, from) != copy)
518 if ((len -= copy) == 0)
523 /* Copy paged appendix. Hmm... why does this look so complicated? */
524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
526 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
528 WARN_ON(start > offset + len);
530 end = start + skb_frag_size(frag);
531 if ((copy = end - offset) > 0) {
536 copied = copy_page_from_iter(skb_frag_page(frag),
537 frag->page_offset + offset - start,
549 skb_walk_frags(skb, frag_iter) {
552 WARN_ON(start > offset + len);
554 end = start + frag_iter->len;
555 if ((copy = end - offset) > 0) {
558 if (skb_copy_datagram_from_iter(frag_iter,
562 if ((len -= copy) == 0)
574 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
577 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
578 * @skb: buffer to copy
579 * @from: the source to copy from
581 * The function will first copy up to headlen, and then pin the userspace
582 * pages and build frags through them.
584 * Returns 0, -EFAULT or -EMSGSIZE.
586 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
588 int len = iov_iter_count(from);
589 int copy = min_t(int, skb_headlen(skb), len);
592 /* copy up to skb headlen */
593 if (skb_copy_datagram_from_iter(skb, 0, from, copy))
596 while (iov_iter_count(from)) {
597 struct page *pages[MAX_SKB_FRAGS];
600 unsigned long truesize;
603 if (frag == MAX_SKB_FRAGS)
606 copied = iov_iter_get_pages(from, pages, ~0U,
607 MAX_SKB_FRAGS - frag, &start);
611 iov_iter_advance(from, copied);
613 truesize = PAGE_ALIGN(copied + start);
614 skb->data_len += copied;
616 skb->truesize += truesize;
617 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
619 int size = min_t(int, copied, PAGE_SIZE - start);
620 skb_fill_page_desc(skb, frag++, pages[n], start, size);
628 EXPORT_SYMBOL(zerocopy_sg_from_iter);
630 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
631 struct iov_iter *to, int len,
634 int start = skb_headlen(skb);
635 int i, copy = start - offset, start_off = offset;
636 struct sk_buff *frag_iter;
644 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
648 if ((len -= copy) == 0)
653 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
655 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
657 WARN_ON(start > offset + len);
659 end = start + skb_frag_size(frag);
660 if ((copy = end - offset) > 0) {
662 struct page *page = skb_frag_page(frag);
663 u8 *vaddr = kmap(page);
667 n = csum_and_copy_to_iter(vaddr + frag->page_offset +
668 offset - start, copy,
674 *csump = csum_block_add(*csump, csum2, pos);
682 skb_walk_frags(skb, frag_iter) {
685 WARN_ON(start > offset + len);
687 end = start + frag_iter->len;
688 if ((copy = end - offset) > 0) {
692 if (skb_copy_and_csum_datagram(frag_iter,
697 *csump = csum_block_add(*csump, csum2, pos);
698 if ((len -= copy) == 0)
709 iov_iter_revert(to, offset - start_off);
713 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
717 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
719 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
720 !skb->csum_complete_sw)
721 netdev_rx_csum_fault(skb->dev);
723 if (!skb_shared(skb))
724 skb->csum_valid = !sum;
727 EXPORT_SYMBOL(__skb_checksum_complete_head);
729 __sum16 __skb_checksum_complete(struct sk_buff *skb)
734 csum = skb_checksum(skb, 0, skb->len, 0);
736 /* skb->csum holds pseudo checksum */
737 sum = csum_fold(csum_add(skb->csum, csum));
739 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
740 !skb->csum_complete_sw)
741 netdev_rx_csum_fault(skb->dev);
744 if (!skb_shared(skb)) {
745 /* Save full packet checksum */
747 skb->ip_summed = CHECKSUM_COMPLETE;
748 skb->csum_complete_sw = 1;
749 skb->csum_valid = !sum;
754 EXPORT_SYMBOL(__skb_checksum_complete);
757 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
759 * @hlen: hardware length
762 * Caller _must_ check that skb will fit to this iovec.
764 * Returns: 0 - success.
765 * -EINVAL - checksum failure.
766 * -EFAULT - fault during copy.
768 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
769 int hlen, struct msghdr *msg)
772 int chunk = skb->len - hlen;
777 if (msg_data_left(msg) < chunk) {
778 if (__skb_checksum_complete(skb))
780 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
783 csum = csum_partial(skb->data, hlen, skb->csum);
784 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
788 if (csum_fold(csum)) {
789 iov_iter_revert(&msg->msg_iter, chunk);
793 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
794 netdev_rx_csum_fault(skb->dev);
800 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
803 * datagram_poll - generic datagram poll
808 * Datagram poll: Again totally generic. This also handles
809 * sequenced packet sockets providing the socket receive queue
810 * is only ever holding data ready to receive.
812 * Note: when you *don't* use this routine for this protocol,
813 * and you use a different write policy from sock_writeable()
814 * then please supply your own write_space callback.
816 unsigned int datagram_poll(struct file *file, struct socket *sock,
819 struct sock *sk = sock->sk;
822 sock_poll_wait(file, sk_sleep(sk), wait);
825 /* exceptional events? */
826 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
828 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
830 if (sk->sk_shutdown & RCV_SHUTDOWN)
831 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
832 if (sk->sk_shutdown == SHUTDOWN_MASK)
836 if (!skb_queue_empty(&sk->sk_receive_queue))
837 mask |= POLLIN | POLLRDNORM;
839 /* Connection-based need to check for termination and startup */
840 if (connection_based(sk)) {
841 if (sk->sk_state == TCP_CLOSE)
843 /* connection hasn't started yet? */
844 if (sk->sk_state == TCP_SYN_SENT)
849 if (sock_writeable(sk))
850 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
852 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
856 EXPORT_SYMBOL(datagram_poll);