2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
39 #include "name_table.h"
42 #include "name_distr.h"
47 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
49 #define TIPC_FWD_MSG 1
50 #define TIPC_MAX_PORT 0xffffffff
51 #define TIPC_MIN_PORT 1
54 TIPC_LISTEN = TCP_LISTEN,
55 TIPC_ESTABLISHED = TCP_ESTABLISHED,
56 TIPC_OPEN = TCP_CLOSE,
57 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
58 TIPC_CONNECTING = TCP_SYN_SENT,
62 * struct tipc_sock - TIPC socket structure
63 * @sk: socket - interacts with 'port' and with user via the socket API
64 * @conn_type: TIPC type used when connection was established
65 * @conn_instance: TIPC instance used when connection was established
66 * @published: non-zero if port has one or more associated names
67 * @max_pkt: maximum packet size "hint" used when building messages sent by port
68 * @portid: unique port identity in TIPC socket hash table
69 * @phdr: preformatted message header used when sending messages
70 * @publications: list of publications for port
71 * @pub_count: total # of publications port has made during its lifetime
73 * @conn_timeout: the time we can wait for an unresponded setup request
74 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
75 * @link_cong: non-zero if owner must sleep because of link congestion
76 * @sent_unacked: # messages sent by socket, and not yet acked by peer
77 * @rcv_unacked: # messages read by user, but not yet acked back to peer
78 * @peer: 'connected' peer for dgram/rdm
79 * @node: hash table node
80 * @rcu: rcu struct for tipc_sock
90 struct list_head sock_list;
91 struct list_head publications;
102 struct sockaddr_tipc peer;
103 struct rhash_head node;
107 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
108 static void tipc_data_ready(struct sock *sk);
109 static void tipc_write_space(struct sock *sk);
110 static void tipc_sock_destruct(struct sock *sk);
111 static int tipc_release(struct socket *sock);
112 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
113 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
114 static void tipc_sk_timeout(unsigned long data);
115 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
116 struct tipc_name_seq const *seq);
117 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
118 struct tipc_name_seq const *seq);
119 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
120 static int tipc_sk_insert(struct tipc_sock *tsk);
121 static void tipc_sk_remove(struct tipc_sock *tsk);
122 static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
124 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
126 static const struct proto_ops packet_ops;
127 static const struct proto_ops stream_ops;
128 static const struct proto_ops msg_ops;
129 static struct proto tipc_proto;
131 static const struct rhashtable_params tsk_rht_params;
134 * Revised TIPC socket locking policy:
136 * Most socket operations take the standard socket lock when they start
137 * and hold it until they finish (or until they need to sleep). Acquiring
138 * this lock grants the owner exclusive access to the fields of the socket
139 * data structures, with the exception of the backlog queue. A few socket
140 * operations can be done without taking the socket lock because they only
141 * read socket information that never changes during the life of the socket.
143 * Socket operations may acquire the lock for the associated TIPC port if they
144 * need to perform an operation on the port. If any routine needs to acquire
145 * both the socket lock and the port lock it must take the socket lock first
146 * to avoid the risk of deadlock.
148 * The dispatcher handling incoming messages cannot grab the socket lock in
149 * the standard fashion, since invoked it runs at the BH level and cannot block.
150 * Instead, it checks to see if the socket lock is currently owned by someone,
151 * and either handles the message itself or adds it to the socket's backlog
152 * queue; in the latter case the queued message is processed once the process
153 * owning the socket lock releases it.
155 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
156 * the problem of a blocked socket operation preventing any other operations
157 * from occurring. However, applications must be careful if they have
158 * multiple threads trying to send (or receive) on the same socket, as these
159 * operations might interfere with each other. For example, doing a connect
160 * and a receive at the same time might allow the receive to consume the
161 * ACK message meant for the connect. While additional work could be done
162 * to try and overcome this, it doesn't seem to be worthwhile at the present.
164 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
165 * that another operation that must be performed in a non-blocking manner is
166 * not delayed for very long because the lock has already been taken.
168 * NOTE: This code assumes that certain fields of a port/socket pair are
169 * constant over its lifetime; such fields can be examined without taking
170 * the socket lock and/or port lock, and do not need to be re-read even
171 * after resuming processing after waiting. These fields include:
173 * - pointer to socket sk structure (aka tipc_sock structure)
174 * - pointer to port structure
178 static u32 tsk_own_node(struct tipc_sock *tsk)
180 return msg_prevnode(&tsk->phdr);
183 static u32 tsk_peer_node(struct tipc_sock *tsk)
185 return msg_destnode(&tsk->phdr);
188 static u32 tsk_peer_port(struct tipc_sock *tsk)
190 return msg_destport(&tsk->phdr);
193 static bool tsk_unreliable(struct tipc_sock *tsk)
195 return msg_src_droppable(&tsk->phdr) != 0;
198 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
200 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
203 static bool tsk_unreturnable(struct tipc_sock *tsk)
205 return msg_dest_droppable(&tsk->phdr) != 0;
208 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
210 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
213 static int tsk_importance(struct tipc_sock *tsk)
215 return msg_importance(&tsk->phdr);
218 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
220 if (imp > TIPC_CRITICAL_IMPORTANCE)
222 msg_set_importance(&tsk->phdr, (u32)imp);
226 static struct tipc_sock *tipc_sk(const struct sock *sk)
228 return container_of(sk, struct tipc_sock, sk);
231 static bool tsk_conn_cong(struct tipc_sock *tsk)
233 return tsk->snt_unacked >= tsk->snd_win;
236 /* tsk_blocks(): translate a buffer size in bytes to number of
237 * advertisable blocks, taking into account the ratio truesize(len)/len
238 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
240 static u16 tsk_adv_blocks(int len)
242 return len / FLOWCTL_BLK_SZ / 4;
245 /* tsk_inc(): increment counter for sent or received data
246 * - If block based flow control is not supported by peer we
247 * fall back to message based ditto, incrementing the counter
249 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
251 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
252 return ((msglen / FLOWCTL_BLK_SZ) + 1);
257 * tsk_advance_rx_queue - discard first buffer in socket receive queue
259 * Caller must hold socket lock
261 static void tsk_advance_rx_queue(struct sock *sk)
263 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
266 /* tipc_sk_respond() : send response message back to sender
268 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
272 u32 onode = tipc_own_addr(sock_net(sk));
274 if (!tipc_msg_reverse(onode, &skb, err))
277 dnode = msg_destnode(buf_msg(skb));
278 selector = msg_origport(buf_msg(skb));
279 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
283 * tsk_rej_rx_queue - reject all buffers in socket receive queue
285 * Caller must hold socket lock
287 static void tsk_rej_rx_queue(struct sock *sk)
291 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
292 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
295 static bool tipc_sk_connected(struct sock *sk)
297 return sk->sk_state == TIPC_ESTABLISHED;
300 /* tipc_sk_type_connectionless - check if the socket is datagram socket
303 * Returns true if connection less, false otherwise
305 static bool tipc_sk_type_connectionless(struct sock *sk)
307 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
310 /* tsk_peer_msg - verify if message was sent by connected port's peer
312 * Handles cases where the node's network address has changed from
313 * the default of <0.0.0> to its configured setting.
315 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
317 struct sock *sk = &tsk->sk;
318 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
319 u32 peer_port = tsk_peer_port(tsk);
323 if (unlikely(!tipc_sk_connected(sk)))
326 if (unlikely(msg_origport(msg) != peer_port))
329 orig_node = msg_orignode(msg);
330 peer_node = tsk_peer_node(tsk);
332 if (likely(orig_node == peer_node))
335 if (!orig_node && (peer_node == tn->own_addr))
338 if (!peer_node && (orig_node == tn->own_addr))
344 /* tipc_set_sk_state - set the sk_state of the socket
347 * Caller must hold socket lock
349 * Returns 0 on success, errno otherwise
351 static int tipc_set_sk_state(struct sock *sk, int state)
353 int oldsk_state = sk->sk_state;
361 case TIPC_CONNECTING:
362 if (oldsk_state == TIPC_OPEN)
365 case TIPC_ESTABLISHED:
366 if (oldsk_state == TIPC_CONNECTING ||
367 oldsk_state == TIPC_OPEN)
370 case TIPC_DISCONNECTING:
371 if (oldsk_state == TIPC_CONNECTING ||
372 oldsk_state == TIPC_ESTABLISHED)
378 sk->sk_state = state;
384 * tipc_sk_create - create a TIPC socket
385 * @net: network namespace (must be default network)
386 * @sock: pre-allocated socket structure
387 * @protocol: protocol indicator (must be 0)
388 * @kern: caused by kernel or by userspace?
390 * This routine creates additional data structures used by the TIPC socket,
391 * initializes them, and links them together.
393 * Returns 0 on success, errno otherwise
395 static int tipc_sk_create(struct net *net, struct socket *sock,
396 int protocol, int kern)
399 const struct proto_ops *ops;
401 struct tipc_sock *tsk;
402 struct tipc_msg *msg;
404 /* Validate arguments */
405 if (unlikely(protocol != 0))
406 return -EPROTONOSUPPORT;
408 switch (sock->type) {
423 /* Allocate socket's protocol area */
424 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
429 tsk->max_pkt = MAX_PKT_DEFAULT;
430 INIT_LIST_HEAD(&tsk->publications);
432 tn = net_generic(sock_net(sk), tipc_net_id);
433 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
436 /* Finish initializing socket data structures */
438 sock_init_data(sock, sk);
439 tipc_set_sk_state(sk, TIPC_OPEN);
440 if (tipc_sk_insert(tsk)) {
441 pr_warn("Socket create failed; port number exhausted\n");
444 msg_set_origport(msg, tsk->portid);
445 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
447 sk->sk_backlog_rcv = tipc_backlog_rcv;
448 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
449 sk->sk_data_ready = tipc_data_ready;
450 sk->sk_write_space = tipc_write_space;
451 sk->sk_destruct = tipc_sock_destruct;
452 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
453 atomic_set(&tsk->dupl_rcvcnt, 0);
455 /* Start out with safe limits until we receive an advertised window */
456 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
457 tsk->rcv_win = tsk->snd_win;
459 if (tipc_sk_type_connectionless(sk)) {
460 tsk_set_unreturnable(tsk, true);
461 if (sock->type == SOCK_DGRAM)
462 tsk_set_unreliable(tsk, true);
468 static void tipc_sk_callback(struct rcu_head *head)
470 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
475 /* Caller should hold socket lock for the socket. */
476 static void __tipc_shutdown(struct socket *sock, int error)
478 struct sock *sk = sock->sk;
479 struct tipc_sock *tsk = tipc_sk(sk);
480 struct net *net = sock_net(sk);
481 u32 dnode = tsk_peer_node(tsk);
484 /* Reject all unreceived messages, except on an active connection
485 * (which disconnects locally & sends a 'FIN+' to peer).
487 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
488 if (TIPC_SKB_CB(skb)->bytes_read) {
491 if (!tipc_sk_type_connectionless(sk) &&
492 sk->sk_state != TIPC_DISCONNECTING) {
493 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
494 tipc_node_remove_conn(net, dnode, tsk->portid);
496 tipc_sk_respond(sk, skb, error);
499 if (sk->sk_state != TIPC_DISCONNECTING) {
500 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
501 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
502 tsk_own_node(tsk), tsk_peer_port(tsk),
505 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
506 if (!tipc_sk_type_connectionless(sk)) {
507 tipc_node_remove_conn(net, dnode, tsk->portid);
508 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
514 * tipc_release - destroy a TIPC socket
515 * @sock: socket to destroy
517 * This routine cleans up any messages that are still queued on the socket.
518 * For DGRAM and RDM socket types, all queued messages are rejected.
519 * For SEQPACKET and STREAM socket types, the first message is rejected
520 * and any others are discarded. (If the first message on a STREAM socket
521 * is partially-read, it is discarded and the next one is rejected instead.)
523 * NOTE: Rejected messages are not necessarily returned to the sender! They
524 * are returned or discarded according to the "destination droppable" setting
525 * specified for the message by the sender.
527 * Returns 0 on success, errno otherwise
529 static int tipc_release(struct socket *sock)
531 struct sock *sk = sock->sk;
532 struct tipc_sock *tsk;
535 * Exit if socket isn't fully initialized (occurs when a failed accept()
536 * releases a pre-allocated child socket that was never used)
544 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
545 sk->sk_shutdown = SHUTDOWN_MASK;
546 tipc_sk_withdraw(tsk, 0, NULL);
547 sk_stop_timer(sk, &sk->sk_timer);
550 /* Reject any messages that accumulated in backlog queue */
553 call_rcu(&tsk->rcu, tipc_sk_callback);
560 * tipc_bind - associate or disassocate TIPC name(s) with a socket
561 * @sock: socket structure
562 * @uaddr: socket address describing name(s) and desired operation
563 * @uaddr_len: size of socket address data structure
565 * Name and name sequence binding is indicated using a positive scope value;
566 * a negative scope value unbinds the specified name. Specifying no name
567 * (i.e. a socket address length of 0) unbinds all names from the socket.
569 * Returns 0 on success, errno otherwise
571 * NOTE: This routine doesn't need to take the socket lock since it doesn't
572 * access any non-constant socket information.
574 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
577 struct sock *sk = sock->sk;
578 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
579 struct tipc_sock *tsk = tipc_sk(sk);
583 if (unlikely(!uaddr_len)) {
584 res = tipc_sk_withdraw(tsk, 0, NULL);
588 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
592 if (addr->family != AF_TIPC) {
597 if (addr->addrtype == TIPC_ADDR_NAME)
598 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
599 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
604 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
605 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
606 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
611 res = (addr->scope > 0) ?
612 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
613 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
620 * tipc_getname - get port ID of socket or peer socket
621 * @sock: socket structure
622 * @uaddr: area for returned socket address
623 * @uaddr_len: area for returned length of socket address
624 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
626 * Returns 0 on success, errno otherwise
628 * NOTE: This routine doesn't need to take the socket lock since it only
629 * accesses socket information that is unchanging (or which changes in
630 * a completely predictable manner).
632 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
633 int *uaddr_len, int peer)
635 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
636 struct sock *sk = sock->sk;
637 struct tipc_sock *tsk = tipc_sk(sk);
638 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
640 memset(addr, 0, sizeof(*addr));
642 if ((!tipc_sk_connected(sk)) &&
643 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
645 addr->addr.id.ref = tsk_peer_port(tsk);
646 addr->addr.id.node = tsk_peer_node(tsk);
648 addr->addr.id.ref = tsk->portid;
649 addr->addr.id.node = tn->own_addr;
652 *uaddr_len = sizeof(*addr);
653 addr->addrtype = TIPC_ADDR_ID;
654 addr->family = AF_TIPC;
656 addr->addr.name.domain = 0;
662 * tipc_poll - read and possibly block on pollmask
663 * @file: file structure associated with the socket
664 * @sock: socket for which to calculate the poll bits
667 * Returns pollmask value
670 * It appears that the usual socket locking mechanisms are not useful here
671 * since the pollmask info is potentially out-of-date the moment this routine
672 * exits. TCP and other protocols seem to rely on higher level poll routines
673 * to handle any preventable race conditions, so TIPC will do the same ...
675 * IMPORTANT: The fact that a read or write operation is indicated does NOT
676 * imply that the operation will succeed, merely that it should be performed
677 * and will not block.
679 static unsigned int tipc_poll(struct file *file, struct socket *sock,
682 struct sock *sk = sock->sk;
683 struct tipc_sock *tsk = tipc_sk(sk);
686 sock_poll_wait(file, sk_sleep(sk), wait);
688 if (sk->sk_shutdown & RCV_SHUTDOWN)
689 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
690 if (sk->sk_shutdown == SHUTDOWN_MASK)
693 switch (sk->sk_state) {
694 case TIPC_ESTABLISHED:
695 if (!tsk->link_cong && !tsk_conn_cong(tsk))
699 case TIPC_CONNECTING:
700 if (!skb_queue_empty(&sk->sk_receive_queue))
701 mask |= (POLLIN | POLLRDNORM);
706 if (tipc_sk_type_connectionless(sk) &&
707 (!skb_queue_empty(&sk->sk_receive_queue)))
708 mask |= (POLLIN | POLLRDNORM);
710 case TIPC_DISCONNECTING:
711 mask = (POLLIN | POLLRDNORM | POLLHUP);
719 * tipc_sendmcast - send multicast message
720 * @sock: socket structure
721 * @seq: destination address
722 * @msg: message to send
723 * @dsz: total length of message data
724 * @timeo: timeout to wait for wakeup
726 * Called from function tipc_sendmsg(), which has done all sanity checks
727 * Returns the number of bytes sent on success, or errno
729 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
730 struct msghdr *msg, size_t dsz, long timeo)
732 struct sock *sk = sock->sk;
733 struct tipc_sock *tsk = tipc_sk(sk);
734 struct net *net = sock_net(sk);
735 struct tipc_msg *mhdr = &tsk->phdr;
736 struct sk_buff_head pktchain;
737 struct iov_iter save = msg->msg_iter;
741 if (!timeo && tsk->link_cong)
744 msg_set_type(mhdr, TIPC_MCAST_MSG);
745 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
746 msg_set_destport(mhdr, 0);
747 msg_set_destnode(mhdr, 0);
748 msg_set_nametype(mhdr, seq->type);
749 msg_set_namelower(mhdr, seq->lower);
750 msg_set_nameupper(mhdr, seq->upper);
751 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
753 skb_queue_head_init(&pktchain);
756 mtu = tipc_bcast_get_mtu(net);
757 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
758 if (unlikely(rc < 0))
762 rc = tipc_bcast_xmit(net, &pktchain);
766 if (rc == -ELINKCONG) {
768 rc = tipc_wait_for_sndmsg(sock, &timeo);
772 __skb_queue_purge(&pktchain);
773 if (rc == -EMSGSIZE) {
774 msg->msg_iter = save;
783 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
784 * @arrvq: queue with arriving messages, to be cloned after destination lookup
785 * @inputq: queue with cloned messages, delivered to socket after dest lookup
787 * Multi-threaded: parallel calls with reference to same queues may occur
789 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
790 struct sk_buff_head *inputq)
792 struct tipc_msg *msg;
793 struct tipc_plist dports;
795 u32 scope = TIPC_CLUSTER_SCOPE;
796 struct sk_buff_head tmpq;
798 struct sk_buff *skb, *_skb;
800 __skb_queue_head_init(&tmpq);
801 tipc_plist_init(&dports);
803 skb = tipc_skb_peek(arrvq, &inputq->lock);
804 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
806 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
808 if (in_own_node(net, msg_orignode(msg)))
809 scope = TIPC_NODE_SCOPE;
811 /* Create destination port list and message clones: */
812 tipc_nametbl_mc_translate(net,
813 msg_nametype(msg), msg_namelower(msg),
814 msg_nameupper(msg), scope, &dports);
815 portid = tipc_plist_pop(&dports);
816 for (; portid; portid = tipc_plist_pop(&dports)) {
817 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
819 msg_set_destport(buf_msg(_skb), portid);
820 __skb_queue_tail(&tmpq, _skb);
823 pr_warn("Failed to clone mcast rcv buffer\n");
825 /* Append to inputq if not already done by other thread */
826 spin_lock_bh(&inputq->lock);
827 if (skb_peek(arrvq) == skb) {
828 skb_queue_splice_tail_init(&tmpq, inputq);
829 kfree_skb(__skb_dequeue(arrvq));
831 spin_unlock_bh(&inputq->lock);
832 __skb_queue_purge(&tmpq);
835 tipc_sk_rcv(net, inputq);
839 * tipc_sk_proto_rcv - receive a connection mng protocol message
840 * @tsk: receiving socket
841 * @skb: pointer to message buffer.
843 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
844 struct sk_buff_head *xmitq)
846 struct sock *sk = &tsk->sk;
847 u32 onode = tsk_own_node(tsk);
848 struct tipc_msg *hdr = buf_msg(skb);
849 int mtyp = msg_type(hdr);
852 /* Ignore if connection cannot be validated: */
853 if (!tsk_peer_msg(tsk, hdr))
856 tsk->probe_unacked = false;
858 if (mtyp == CONN_PROBE) {
859 msg_set_type(hdr, CONN_PROBE_REPLY);
860 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
861 __skb_queue_tail(xmitq, skb);
863 } else if (mtyp == CONN_ACK) {
864 conn_cong = tsk_conn_cong(tsk);
865 tsk->snt_unacked -= msg_conn_ack(hdr);
866 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
867 tsk->snd_win = msg_adv_win(hdr);
869 sk->sk_write_space(sk);
870 } else if (mtyp != CONN_PROBE_REPLY) {
871 pr_warn("Received unknown CONN_PROTO msg\n");
877 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
879 struct sock *sk = sock->sk;
880 struct tipc_sock *tsk = tipc_sk(sk);
885 int err = sock_error(sk);
888 if (sk->sk_shutdown & SEND_SHUTDOWN)
892 if (signal_pending(current))
893 return sock_intr_errno(*timeo_p);
895 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
896 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
897 finish_wait(sk_sleep(sk), &wait);
903 * tipc_sendmsg - send message in connectionless manner
904 * @sock: socket structure
905 * @m: message to send
906 * @dsz: amount of user data to be sent
908 * Message must have an destination specified explicitly.
909 * Used for SOCK_RDM and SOCK_DGRAM messages,
910 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
911 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
913 * Returns the number of bytes sent on success, or errno otherwise
915 static int tipc_sendmsg(struct socket *sock,
916 struct msghdr *m, size_t dsz)
918 struct sock *sk = sock->sk;
922 ret = __tipc_sendmsg(sock, m, dsz);
928 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
930 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
931 struct sock *sk = sock->sk;
932 struct tipc_sock *tsk = tipc_sk(sk);
933 struct net *net = sock_net(sk);
934 struct tipc_msg *mhdr = &tsk->phdr;
936 struct sk_buff_head pktchain;
937 bool is_connectionless = tipc_sk_type_connectionless(sk);
939 struct tipc_name_seq *seq;
940 struct iov_iter save;
945 if (dsz > TIPC_MAX_USER_MSG_SIZE)
947 if (unlikely(!dest)) {
948 if (is_connectionless && tsk->peer.family == AF_TIPC)
951 return -EDESTADDRREQ;
952 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
953 dest->family != AF_TIPC) {
956 if (!is_connectionless) {
957 if (sk->sk_state == TIPC_LISTEN)
959 if (sk->sk_state != TIPC_OPEN)
963 if (dest->addrtype == TIPC_ADDR_NAME) {
964 tsk->conn_type = dest->addr.name.name.type;
965 tsk->conn_instance = dest->addr.name.name.instance;
968 seq = &dest->addr.nameseq;
969 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
971 if (dest->addrtype == TIPC_ADDR_MCAST) {
972 return tipc_sendmcast(sock, seq, m, dsz, timeo);
973 } else if (dest->addrtype == TIPC_ADDR_NAME) {
974 u32 type = dest->addr.name.name.type;
975 u32 inst = dest->addr.name.name.instance;
976 u32 domain = dest->addr.name.domain;
979 msg_set_type(mhdr, TIPC_NAMED_MSG);
980 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
981 msg_set_nametype(mhdr, type);
982 msg_set_nameinst(mhdr, inst);
983 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
984 dport = tipc_nametbl_translate(net, type, inst, &dnode);
985 msg_set_destnode(mhdr, dnode);
986 msg_set_destport(mhdr, dport);
987 if (unlikely(!dport && !dnode))
988 return -EHOSTUNREACH;
989 } else if (dest->addrtype == TIPC_ADDR_ID) {
990 dnode = dest->addr.id.node;
991 msg_set_type(mhdr, TIPC_DIRECT_MSG);
992 msg_set_lookup_scope(mhdr, 0);
993 msg_set_destnode(mhdr, dnode);
994 msg_set_destport(mhdr, dest->addr.id.ref);
995 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
998 skb_queue_head_init(&pktchain);
1001 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1002 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
1007 skb = skb_peek(&pktchain);
1008 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
1009 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
1011 if (!is_connectionless)
1012 tipc_set_sk_state(sk, TIPC_CONNECTING);
1015 if (rc == -ELINKCONG) {
1017 rc = tipc_wait_for_sndmsg(sock, &timeo);
1021 __skb_queue_purge(&pktchain);
1022 if (rc == -EMSGSIZE) {
1032 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
1034 struct sock *sk = sock->sk;
1035 struct tipc_sock *tsk = tipc_sk(sk);
1040 int err = sock_error(sk);
1043 if (sk->sk_state == TIPC_DISCONNECTING)
1045 else if (!tipc_sk_connected(sk))
1049 if (signal_pending(current))
1050 return sock_intr_errno(*timeo_p);
1052 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1053 done = sk_wait_event(sk, timeo_p,
1055 !tsk_conn_cong(tsk)) ||
1056 !tipc_sk_connected(sk));
1057 finish_wait(sk_sleep(sk), &wait);
1063 * tipc_send_stream - send stream-oriented data
1064 * @sock: socket structure
1066 * @dsz: total length of data to be transmitted
1068 * Used for SOCK_STREAM data.
1070 * Returns the number of bytes sent on success (or partial success),
1071 * or errno if no data sent
1073 static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1075 struct sock *sk = sock->sk;
1079 ret = __tipc_send_stream(sock, m, dsz);
1085 static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1087 struct sock *sk = sock->sk;
1088 struct net *net = sock_net(sk);
1089 struct tipc_sock *tsk = tipc_sk(sk);
1090 struct tipc_msg *mhdr = &tsk->phdr;
1091 struct sk_buff_head pktchain;
1092 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1093 u32 portid = tsk->portid;
1097 uint mtu, send, sent = 0;
1098 struct iov_iter save;
1099 int hlen = MIN_H_SIZE;
1101 /* Handle implied connection establishment */
1102 if (unlikely(dest)) {
1103 rc = __tipc_sendmsg(sock, m, dsz);
1104 hlen = msg_hdr_sz(mhdr);
1105 if (dsz && (dsz == rc))
1106 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
1109 if (dsz > (uint)INT_MAX)
1112 if (unlikely(!tipc_sk_connected(sk))) {
1113 if (sk->sk_state == TIPC_DISCONNECTING)
1119 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1120 if (!timeo && tsk->link_cong)
1123 dnode = tsk_peer_node(tsk);
1124 skb_queue_head_init(&pktchain);
1129 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1130 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
1131 if (unlikely(rc < 0))
1135 if (likely(!tsk_conn_cong(tsk))) {
1136 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
1138 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
1144 if (rc == -EMSGSIZE) {
1145 __skb_queue_purge(&pktchain);
1146 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1151 if (rc != -ELINKCONG)
1156 rc = tipc_wait_for_sndpkt(sock, &timeo);
1159 __skb_queue_purge(&pktchain);
1160 return sent ? sent : rc;
1164 * tipc_send_packet - send a connection-oriented message
1165 * @sock: socket structure
1166 * @m: message to send
1167 * @dsz: length of data to be transmitted
1169 * Used for SOCK_SEQPACKET messages.
1171 * Returns the number of bytes sent on success, or errno otherwise
1173 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1175 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1178 return tipc_send_stream(sock, m, dsz);
1181 /* tipc_sk_finish_conn - complete the setup of a connection
1183 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1186 struct sock *sk = &tsk->sk;
1187 struct net *net = sock_net(sk);
1188 struct tipc_msg *msg = &tsk->phdr;
1190 msg_set_destnode(msg, peer_node);
1191 msg_set_destport(msg, peer_port);
1192 msg_set_type(msg, TIPC_CONN_MSG);
1193 msg_set_lookup_scope(msg, 0);
1194 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1196 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1197 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1198 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1199 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1200 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1201 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1204 /* Fall back to message based flow control */
1205 tsk->rcv_win = FLOWCTL_MSG_WIN;
1206 tsk->snd_win = FLOWCTL_MSG_WIN;
1210 * set_orig_addr - capture sender's address for received message
1211 * @m: descriptor for message info
1212 * @msg: received message header
1214 * Note: Address is not captured if not requested by receiver.
1216 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1218 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1221 addr->family = AF_TIPC;
1222 addr->addrtype = TIPC_ADDR_ID;
1223 memset(&addr->addr, 0, sizeof(addr->addr));
1224 addr->addr.id.ref = msg_origport(msg);
1225 addr->addr.id.node = msg_orignode(msg);
1226 addr->addr.name.domain = 0; /* could leave uninitialized */
1227 addr->scope = 0; /* could leave uninitialized */
1228 m->msg_namelen = sizeof(struct sockaddr_tipc);
1233 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1234 * @m: descriptor for message info
1235 * @msg: received message header
1236 * @tsk: TIPC port associated with message
1238 * Note: Ancillary data is not captured if not requested by receiver.
1240 * Returns 0 if successful, otherwise errno
1242 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1243 struct tipc_sock *tsk)
1251 if (likely(m->msg_controllen == 0))
1254 /* Optionally capture errored message object(s) */
1255 err = msg ? msg_errcode(msg) : 0;
1256 if (unlikely(err)) {
1258 anc_data[1] = msg_data_sz(msg);
1259 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1263 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1270 /* Optionally capture message destination object */
1271 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1272 switch (dest_type) {
1273 case TIPC_NAMED_MSG:
1275 anc_data[0] = msg_nametype(msg);
1276 anc_data[1] = msg_namelower(msg);
1277 anc_data[2] = msg_namelower(msg);
1279 case TIPC_MCAST_MSG:
1281 anc_data[0] = msg_nametype(msg);
1282 anc_data[1] = msg_namelower(msg);
1283 anc_data[2] = msg_nameupper(msg);
1286 has_name = (tsk->conn_type != 0);
1287 anc_data[0] = tsk->conn_type;
1288 anc_data[1] = tsk->conn_instance;
1289 anc_data[2] = tsk->conn_instance;
1295 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1303 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1305 struct sock *sk = &tsk->sk;
1306 struct net *net = sock_net(sk);
1307 struct sk_buff *skb = NULL;
1308 struct tipc_msg *msg;
1309 u32 peer_port = tsk_peer_port(tsk);
1310 u32 dnode = tsk_peer_node(tsk);
1312 if (!tipc_sk_connected(sk))
1314 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1315 dnode, tsk_own_node(tsk), peer_port,
1316 tsk->portid, TIPC_OK);
1320 msg_set_conn_ack(msg, tsk->rcv_unacked);
1321 tsk->rcv_unacked = 0;
1323 /* Adjust to and advertize the correct window limit */
1324 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1325 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1326 msg_set_adv_win(msg, tsk->rcv_win);
1328 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1331 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1333 struct sock *sk = sock->sk;
1335 long timeo = *timeop;
1339 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1340 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1341 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1346 timeo = schedule_timeout(timeo);
1350 if (!skb_queue_empty(&sk->sk_receive_queue))
1355 err = sock_intr_errno(timeo);
1356 if (signal_pending(current))
1359 finish_wait(sk_sleep(sk), &wait);
1365 * tipc_recvmsg - receive packet-oriented message
1366 * @m: descriptor for message info
1367 * @buf_len: total size of user buffer area
1368 * @flags: receive flags
1370 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1371 * If the complete message doesn't fit in user area, truncate it.
1373 * Returns size of returned message data, errno otherwise
1375 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1378 struct sock *sk = sock->sk;
1379 struct tipc_sock *tsk = tipc_sk(sk);
1380 struct sk_buff *buf;
1381 struct tipc_msg *msg;
1382 bool is_connectionless = tipc_sk_type_connectionless(sk);
1388 /* Catch invalid receive requests */
1389 if (unlikely(!buf_len))
1394 if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1399 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1402 /* Look for a message in receive queue; wait if necessary */
1403 res = tipc_wait_for_rcvmsg(sock, &timeo);
1407 /* Look at first message in receive queue */
1408 buf = skb_peek(&sk->sk_receive_queue);
1410 sz = msg_data_sz(msg);
1411 hlen = msg_hdr_sz(msg);
1412 err = msg_errcode(msg);
1414 /* Discard an empty non-errored message & try again */
1415 if ((!sz) && (!err)) {
1416 tsk_advance_rx_queue(sk);
1420 /* Capture sender's address (optional) */
1421 set_orig_addr(m, msg);
1423 /* Capture ancillary data (optional) */
1424 res = tipc_sk_anc_data_recv(m, msg, tsk);
1428 /* Capture message data (if valid) & compute return value (always) */
1430 if (unlikely(buf_len < sz)) {
1432 m->msg_flags |= MSG_TRUNC;
1434 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1439 if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
1446 if (unlikely(flags & MSG_PEEK))
1449 if (likely(!is_connectionless)) {
1450 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1451 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1452 tipc_sk_send_ack(tsk);
1454 tsk_advance_rx_queue(sk);
1461 * tipc_recv_stream - receive stream-oriented data
1462 * @m: descriptor for message info
1463 * @buf_len: total size of user buffer area
1464 * @flags: receive flags
1466 * Used for SOCK_STREAM messages only. If not enough data is available
1467 * will optionally wait for more; never truncates data.
1469 * Returns size of returned message data, errno otherwise
1471 static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1472 size_t buf_len, int flags)
1474 struct sock *sk = sock->sk;
1475 struct tipc_sock *tsk = tipc_sk(sk);
1476 struct sk_buff *buf;
1477 struct tipc_msg *msg;
1485 /* Catch invalid receive attempts */
1486 if (unlikely(!buf_len))
1491 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1496 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1497 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1500 /* Look for a message in receive queue; wait if necessary */
1501 res = tipc_wait_for_rcvmsg(sock, &timeo);
1505 /* Look at first message in receive queue */
1506 buf = skb_peek(&sk->sk_receive_queue);
1508 sz = msg_data_sz(msg);
1509 hlen = msg_hdr_sz(msg);
1510 err = msg_errcode(msg);
1512 /* Discard an empty non-errored message & try again */
1513 if ((!sz) && (!err)) {
1514 tsk_advance_rx_queue(sk);
1518 /* Optionally capture sender's address & ancillary data of first msg */
1519 if (sz_copied == 0) {
1520 set_orig_addr(m, msg);
1521 res = tipc_sk_anc_data_recv(m, msg, tsk);
1526 /* Capture message data (if valid) & compute return value (always) */
1528 u32 offset = TIPC_SKB_CB(buf)->bytes_read;
1533 needed = (buf_len - sz_copied);
1534 sz_to_copy = min(sz, needed);
1536 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1540 sz_copied += sz_to_copy;
1542 if (sz_to_copy < sz) {
1543 if (!(flags & MSG_PEEK))
1544 TIPC_SKB_CB(buf)->bytes_read =
1545 offset + sz_to_copy;
1550 goto exit; /* can't add error msg to valid data */
1552 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1558 if (unlikely(flags & MSG_PEEK))
1561 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1562 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1563 tipc_sk_send_ack(tsk);
1564 tsk_advance_rx_queue(sk);
1566 /* Loop around if more data is required */
1567 if ((sz_copied < buf_len) && /* didn't get all requested data */
1568 (!skb_queue_empty(&sk->sk_receive_queue) ||
1569 (sz_copied < target)) && /* and more is ready or required */
1570 (!err)) /* and haven't reached a FIN */
1575 return sz_copied ? sz_copied : res;
1579 * tipc_write_space - wake up thread if port congestion is released
1582 static void tipc_write_space(struct sock *sk)
1584 struct socket_wq *wq;
1587 wq = rcu_dereference(sk->sk_wq);
1588 if (skwq_has_sleeper(wq))
1589 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1590 POLLWRNORM | POLLWRBAND);
1595 * tipc_data_ready - wake up threads to indicate messages have been received
1597 * @len: the length of messages
1599 static void tipc_data_ready(struct sock *sk)
1601 struct socket_wq *wq;
1604 wq = rcu_dereference(sk->sk_wq);
1605 if (skwq_has_sleeper(wq))
1606 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1607 POLLRDNORM | POLLRDBAND);
1611 static void tipc_sock_destruct(struct sock *sk)
1613 __skb_queue_purge(&sk->sk_receive_queue);
1617 * filter_connect - Handle all incoming messages for a connection-based socket
1619 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1621 * Returns true if everything ok, false otherwise
1623 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1625 struct sock *sk = &tsk->sk;
1626 struct net *net = sock_net(sk);
1627 struct tipc_msg *hdr = buf_msg(skb);
1629 if (unlikely(msg_mcast(hdr)))
1632 switch (sk->sk_state) {
1633 case TIPC_CONNECTING:
1634 /* Accept only ACK or NACK message */
1635 if (unlikely(!msg_connected(hdr)))
1638 if (unlikely(msg_errcode(hdr))) {
1639 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1640 sk->sk_err = ECONNREFUSED;
1644 if (unlikely(!msg_isdata(hdr))) {
1645 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1646 sk->sk_err = EINVAL;
1650 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1651 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1653 /* If 'ACK+' message, add to socket receive queue */
1654 if (msg_data_sz(hdr))
1657 /* If empty 'ACK-' message, wake up sleeping connect() */
1658 if (waitqueue_active(sk_sleep(sk)))
1659 wake_up_interruptible(sk_sleep(sk));
1661 /* 'ACK-' message is neither accepted nor rejected: */
1662 msg_set_dest_droppable(hdr, 1);
1666 case TIPC_DISCONNECTING:
1669 /* Accept only SYN message */
1670 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1673 case TIPC_ESTABLISHED:
1674 /* Accept only connection-based messages sent by peer */
1675 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1678 if (unlikely(msg_errcode(hdr))) {
1679 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1680 /* Let timer expire on it's own */
1681 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1683 sk->sk_state_change(sk);
1687 pr_err("Unknown sk_state %u\n", sk->sk_state);
1694 * rcvbuf_limit - get proper overload limit of socket receive queue
1698 * For connection oriented messages, irrespective of importance,
1699 * default queue limit is 2 MB.
1701 * For connectionless messages, queue limits are based on message
1702 * importance as follows:
1704 * TIPC_LOW_IMPORTANCE (2 MB)
1705 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1706 * TIPC_HIGH_IMPORTANCE (8 MB)
1707 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1709 * Returns overload limit according to corresponding message importance
1711 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1713 struct tipc_sock *tsk = tipc_sk(sk);
1714 struct tipc_msg *hdr = buf_msg(skb);
1716 if (unlikely(!msg_connected(hdr)))
1717 return sk->sk_rcvbuf << msg_importance(hdr);
1719 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1720 return sk->sk_rcvbuf;
1722 return FLOWCTL_MSG_LIM;
1726 * filter_rcv - validate incoming message
1728 * @skb: pointer to message.
1730 * Enqueues message on receive queue if acceptable; optionally handles
1731 * disconnect indication for a connected socket.
1733 * Called with socket lock already taken
1735 * Returns true if message was added to socket receive queue, otherwise false
1737 static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1738 struct sk_buff_head *xmitq)
1740 struct tipc_sock *tsk = tipc_sk(sk);
1741 struct tipc_msg *hdr = buf_msg(skb);
1742 unsigned int limit = rcvbuf_limit(sk, skb);
1744 int usr = msg_user(hdr);
1746 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1747 tipc_sk_proto_rcv(tsk, skb, xmitq);
1751 if (unlikely(usr == SOCK_WAKEUP)) {
1754 sk->sk_write_space(sk);
1758 /* Drop if illegal message type */
1759 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1764 /* Reject if wrong message type for current socket state */
1765 if (tipc_sk_type_connectionless(sk)) {
1766 if (msg_connected(hdr)) {
1767 err = TIPC_ERR_NO_PORT;
1770 } else if (unlikely(!filter_connect(tsk, skb))) {
1771 err = TIPC_ERR_NO_PORT;
1775 /* Reject message if there isn't room to queue it */
1776 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1777 err = TIPC_ERR_OVERLOAD;
1781 /* Enqueue message */
1782 TIPC_SKB_CB(skb)->bytes_read = 0;
1783 __skb_queue_tail(&sk->sk_receive_queue, skb);
1784 skb_set_owner_r(skb, sk);
1786 sk->sk_data_ready(sk);
1790 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1791 __skb_queue_tail(xmitq, skb);
1796 * tipc_backlog_rcv - handle incoming message from backlog queue
1800 * Caller must hold socket lock
1804 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1806 unsigned int truesize = skb->truesize;
1807 struct sk_buff_head xmitq;
1808 u32 dnode, selector;
1810 __skb_queue_head_init(&xmitq);
1812 if (likely(filter_rcv(sk, skb, &xmitq))) {
1813 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1817 if (skb_queue_empty(&xmitq))
1820 /* Send response/rejected message */
1821 skb = __skb_dequeue(&xmitq);
1822 dnode = msg_destnode(buf_msg(skb));
1823 selector = msg_origport(buf_msg(skb));
1824 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1829 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1830 * inputq and try adding them to socket or backlog queue
1831 * @inputq: list of incoming buffers with potentially different destinations
1832 * @sk: socket where the buffers should be enqueued
1833 * @dport: port number for the socket
1835 * Caller must hold socket lock
1837 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1838 u32 dport, struct sk_buff_head *xmitq)
1840 unsigned long time_limit = jiffies + 2;
1841 struct sk_buff *skb;
1846 while (skb_queue_len(inputq)) {
1847 if (unlikely(time_after_eq(jiffies, time_limit)))
1850 skb = tipc_skb_dequeue(inputq, dport);
1854 /* Add message directly to receive queue if possible */
1855 if (!sock_owned_by_user(sk)) {
1856 filter_rcv(sk, skb, xmitq);
1860 /* Try backlog, compensating for double-counted bytes */
1861 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1862 if (!sk->sk_backlog.len)
1863 atomic_set(dcnt, 0);
1864 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1865 if (likely(!sk_add_backlog(sk, skb, lim)))
1868 /* Overload => reject message back to sender */
1869 onode = tipc_own_addr(sock_net(sk));
1870 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1871 __skb_queue_tail(xmitq, skb);
1877 * tipc_sk_rcv - handle a chain of incoming buffers
1878 * @inputq: buffer list containing the buffers
1879 * Consumes all buffers in list until inputq is empty
1880 * Note: may be called in multiple threads referring to the same queue
1882 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1884 struct sk_buff_head xmitq;
1885 u32 dnode, dport = 0;
1887 struct tipc_sock *tsk;
1889 struct sk_buff *skb;
1891 __skb_queue_head_init(&xmitq);
1892 while (skb_queue_len(inputq)) {
1893 dport = tipc_skb_peek_port(inputq, dport);
1894 tsk = tipc_sk_lookup(net, dport);
1898 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1899 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1900 spin_unlock_bh(&sk->sk_lock.slock);
1902 /* Send pending response/rejected messages, if any */
1903 while ((skb = __skb_dequeue(&xmitq))) {
1904 dnode = msg_destnode(buf_msg(skb));
1905 tipc_node_xmit_skb(net, skb, dnode, dport);
1911 /* No destination socket => dequeue skb if still there */
1912 skb = tipc_skb_dequeue(inputq, dport);
1916 /* Try secondary lookup if unresolved named message */
1917 err = TIPC_ERR_NO_PORT;
1918 if (tipc_msg_lookup_dest(net, skb, &err))
1921 /* Prepare for message rejection */
1922 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1925 dnode = msg_destnode(buf_msg(skb));
1926 tipc_node_xmit_skb(net, skb, dnode, dport);
1930 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1932 struct sock *sk = sock->sk;
1937 int err = sock_error(sk);
1942 if (signal_pending(current))
1943 return sock_intr_errno(*timeo_p);
1945 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1946 done = sk_wait_event(sk, timeo_p,
1947 sk->sk_state != TIPC_CONNECTING);
1948 finish_wait(sk_sleep(sk), &wait);
1954 * tipc_connect - establish a connection to another TIPC port
1955 * @sock: socket structure
1956 * @dest: socket address for destination port
1957 * @destlen: size of socket address data structure
1958 * @flags: file-related flags associated with socket
1960 * Returns 0 on success, errno otherwise
1962 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1963 int destlen, int flags)
1965 struct sock *sk = sock->sk;
1966 struct tipc_sock *tsk = tipc_sk(sk);
1967 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1968 struct msghdr m = {NULL,};
1969 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1975 /* DGRAM/RDM connect(), just save the destaddr */
1976 if (tipc_sk_type_connectionless(sk)) {
1977 if (dst->family == AF_UNSPEC) {
1978 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1979 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1982 memcpy(&tsk->peer, dest, destlen);
1988 * Reject connection attempt using multicast address
1990 * Note: send_msg() validates the rest of the address fields,
1991 * so there's no need to do it here
1993 if (dst->addrtype == TIPC_ADDR_MCAST) {
1998 previous = sk->sk_state;
2000 switch (sk->sk_state) {
2002 /* Send a 'SYN-' to destination */
2004 m.msg_namelen = destlen;
2006 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2007 * indicate send_msg() is never blocked.
2010 m.msg_flags = MSG_DONTWAIT;
2012 res = __tipc_sendmsg(sock, &m, 0);
2013 if ((res < 0) && (res != -EWOULDBLOCK))
2016 /* Just entered TIPC_CONNECTING state; the only
2017 * difference is that return value in non-blocking
2018 * case is EINPROGRESS, rather than EALREADY.
2022 case TIPC_CONNECTING:
2024 if (previous == TIPC_CONNECTING)
2028 timeout = msecs_to_jiffies(timeout);
2029 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2030 res = tipc_wait_for_connect(sock, &timeout);
2032 case TIPC_ESTABLISHED:
2045 * tipc_listen - allow socket to listen for incoming connections
2046 * @sock: socket structure
2049 * Returns 0 on success, errno otherwise
2051 static int tipc_listen(struct socket *sock, int len)
2053 struct sock *sk = sock->sk;
2057 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2063 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2065 struct sock *sk = sock->sk;
2069 /* True wake-one mechanism for incoming connections: only
2070 * one process gets woken up, not the 'whole herd'.
2071 * Since we do not 'race & poll' for established sockets
2072 * anymore, the common case will execute the loop only once.
2075 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2076 TASK_INTERRUPTIBLE);
2077 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2079 timeo = schedule_timeout(timeo);
2083 if (!skb_queue_empty(&sk->sk_receive_queue))
2088 err = sock_intr_errno(timeo);
2089 if (signal_pending(current))
2092 finish_wait(sk_sleep(sk), &wait);
2097 * tipc_accept - wait for connection request
2098 * @sock: listening socket
2099 * @newsock: new socket that is to be connected
2100 * @flags: file-related flags associated with socket
2102 * Returns 0 on success, errno otherwise
2104 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2106 struct sock *new_sk, *sk = sock->sk;
2107 struct sk_buff *buf;
2108 struct tipc_sock *new_tsock;
2109 struct tipc_msg *msg;
2115 if (sk->sk_state != TIPC_LISTEN) {
2119 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2120 res = tipc_wait_for_accept(sock, timeo);
2124 buf = skb_peek(&sk->sk_receive_queue);
2126 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
2129 security_sk_clone(sock->sk, new_sock->sk);
2131 new_sk = new_sock->sk;
2132 new_tsock = tipc_sk(new_sk);
2135 /* we lock on new_sk; but lockdep sees the lock on sk */
2136 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2139 * Reject any stray messages received by new socket
2140 * before the socket lock was taken (very, very unlikely)
2142 tsk_rej_rx_queue(new_sk);
2144 /* Connect new socket to it's peer */
2145 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2147 tsk_set_importance(new_tsock, msg_importance(msg));
2148 if (msg_named(msg)) {
2149 new_tsock->conn_type = msg_nametype(msg);
2150 new_tsock->conn_instance = msg_nameinst(msg);
2154 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2155 * Respond to 'SYN+' by queuing it on new socket.
2157 if (!msg_data_sz(msg)) {
2158 struct msghdr m = {NULL,};
2160 tsk_advance_rx_queue(sk);
2161 __tipc_send_stream(new_sock, &m, 0);
2163 __skb_dequeue(&sk->sk_receive_queue);
2164 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2165 skb_set_owner_r(buf, new_sk);
2167 release_sock(new_sk);
2174 * tipc_shutdown - shutdown socket connection
2175 * @sock: socket structure
2176 * @how: direction to close (must be SHUT_RDWR)
2178 * Terminates connection (if necessary), then purges socket's receive queue.
2180 * Returns 0 on success, errno otherwise
2182 static int tipc_shutdown(struct socket *sock, int how)
2184 struct sock *sk = sock->sk;
2187 if (how != SHUT_RDWR)
2192 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2193 sk->sk_shutdown = SEND_SHUTDOWN;
2195 if (sk->sk_state == TIPC_DISCONNECTING) {
2196 /* Discard any unreceived messages */
2197 __skb_queue_purge(&sk->sk_receive_queue);
2199 /* Wake up anyone sleeping in poll */
2200 sk->sk_state_change(sk);
2210 static void tipc_sk_timeout(unsigned long data)
2212 struct tipc_sock *tsk = (struct tipc_sock *)data;
2213 struct sock *sk = &tsk->sk;
2214 struct sk_buff *skb = NULL;
2215 u32 peer_port, peer_node;
2216 u32 own_node = tsk_own_node(tsk);
2219 if (!tipc_sk_connected(sk)) {
2223 peer_port = tsk_peer_port(tsk);
2224 peer_node = tsk_peer_node(tsk);
2226 if (tsk->probe_unacked) {
2227 if (!sock_owned_by_user(sk)) {
2228 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2229 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2230 tsk_peer_port(tsk));
2231 sk->sk_state_change(sk);
2233 /* Try again later */
2234 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2241 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2242 INT_H_SIZE, 0, peer_node, own_node,
2243 peer_port, tsk->portid, TIPC_OK);
2244 tsk->probe_unacked = true;
2245 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2248 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2253 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2254 struct tipc_name_seq const *seq)
2256 struct sock *sk = &tsk->sk;
2257 struct net *net = sock_net(sk);
2258 struct publication *publ;
2261 if (tipc_sk_connected(sk))
2263 key = tsk->portid + tsk->pub_count + 1;
2264 if (key == tsk->portid)
2267 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2268 scope, tsk->portid, key);
2269 if (unlikely(!publ))
2272 list_add(&publ->pport_list, &tsk->publications);
2278 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2279 struct tipc_name_seq const *seq)
2281 struct net *net = sock_net(&tsk->sk);
2282 struct publication *publ;
2283 struct publication *safe;
2286 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2288 if (publ->scope != scope)
2290 if (publ->type != seq->type)
2292 if (publ->lower != seq->lower)
2294 if (publ->upper != seq->upper)
2296 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2297 publ->ref, publ->key);
2301 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2302 publ->ref, publ->key);
2305 if (list_empty(&tsk->publications))
2310 /* tipc_sk_reinit: set non-zero address in all existing sockets
2311 * when we go from standalone to network mode.
2313 void tipc_sk_reinit(struct net *net)
2315 struct tipc_net *tn = net_generic(net, tipc_net_id);
2316 const struct bucket_table *tbl;
2317 struct rhash_head *pos;
2318 struct tipc_sock *tsk;
2319 struct tipc_msg *msg;
2323 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2324 for (i = 0; i < tbl->size; i++) {
2325 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2326 spin_lock_bh(&tsk->sk.sk_lock.slock);
2328 msg_set_prevnode(msg, tn->own_addr);
2329 msg_set_orignode(msg, tn->own_addr);
2330 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2336 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2338 struct tipc_net *tn = net_generic(net, tipc_net_id);
2339 struct tipc_sock *tsk;
2342 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2344 sock_hold(&tsk->sk);
2350 static int tipc_sk_insert(struct tipc_sock *tsk)
2352 struct sock *sk = &tsk->sk;
2353 struct net *net = sock_net(sk);
2354 struct tipc_net *tn = net_generic(net, tipc_net_id);
2355 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2356 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2358 while (remaining--) {
2360 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2361 portid = TIPC_MIN_PORT;
2362 tsk->portid = portid;
2363 sock_hold(&tsk->sk);
2364 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2373 static void tipc_sk_remove(struct tipc_sock *tsk)
2375 struct sock *sk = &tsk->sk;
2376 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2378 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2379 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2384 static const struct rhashtable_params tsk_rht_params = {
2386 .head_offset = offsetof(struct tipc_sock, node),
2387 .key_offset = offsetof(struct tipc_sock, portid),
2388 .key_len = sizeof(u32), /* portid */
2389 .max_size = 1048576,
2391 .automatic_shrinking = true,
2394 int tipc_sk_rht_init(struct net *net)
2396 struct tipc_net *tn = net_generic(net, tipc_net_id);
2398 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2401 void tipc_sk_rht_destroy(struct net *net)
2403 struct tipc_net *tn = net_generic(net, tipc_net_id);
2405 /* Wait for socket readers to complete */
2408 rhashtable_destroy(&tn->sk_rht);
2412 * tipc_setsockopt - set socket option
2413 * @sock: socket structure
2414 * @lvl: option level
2415 * @opt: option identifier
2416 * @ov: pointer to new option value
2417 * @ol: length of option value
2419 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2420 * (to ease compatibility).
2422 * Returns 0 on success, errno otherwise
2424 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2425 char __user *ov, unsigned int ol)
2427 struct sock *sk = sock->sk;
2428 struct tipc_sock *tsk = tipc_sk(sk);
2432 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2434 if (lvl != SOL_TIPC)
2435 return -ENOPROTOOPT;
2436 if (ol < sizeof(value))
2438 res = get_user(value, (u32 __user *)ov);
2445 case TIPC_IMPORTANCE:
2446 res = tsk_set_importance(tsk, value);
2448 case TIPC_SRC_DROPPABLE:
2449 if (sock->type != SOCK_STREAM)
2450 tsk_set_unreliable(tsk, value);
2454 case TIPC_DEST_DROPPABLE:
2455 tsk_set_unreturnable(tsk, value);
2457 case TIPC_CONN_TIMEOUT:
2458 tipc_sk(sk)->conn_timeout = value;
2459 /* no need to set "res", since already 0 at this point */
2471 * tipc_getsockopt - get socket option
2472 * @sock: socket structure
2473 * @lvl: option level
2474 * @opt: option identifier
2475 * @ov: receptacle for option value
2476 * @ol: receptacle for length of option value
2478 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2479 * (to ease compatibility).
2481 * Returns 0 on success, errno otherwise
2483 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2484 char __user *ov, int __user *ol)
2486 struct sock *sk = sock->sk;
2487 struct tipc_sock *tsk = tipc_sk(sk);
2492 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2493 return put_user(0, ol);
2494 if (lvl != SOL_TIPC)
2495 return -ENOPROTOOPT;
2496 res = get_user(len, ol);
2503 case TIPC_IMPORTANCE:
2504 value = tsk_importance(tsk);
2506 case TIPC_SRC_DROPPABLE:
2507 value = tsk_unreliable(tsk);
2509 case TIPC_DEST_DROPPABLE:
2510 value = tsk_unreturnable(tsk);
2512 case TIPC_CONN_TIMEOUT:
2513 value = tsk->conn_timeout;
2514 /* no need to set "res", since already 0 at this point */
2516 case TIPC_NODE_RECVQ_DEPTH:
2517 value = 0; /* was tipc_queue_size, now obsolete */
2519 case TIPC_SOCK_RECVQ_DEPTH:
2520 value = skb_queue_len(&sk->sk_receive_queue);
2529 return res; /* "get" failed */
2531 if (len < sizeof(value))
2534 if (copy_to_user(ov, &value, sizeof(value)))
2537 return put_user(sizeof(value), ol);
2540 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2542 struct sock *sk = sock->sk;
2543 struct tipc_sioc_ln_req lnr;
2544 void __user *argp = (void __user *)arg;
2547 case SIOCGETLINKNAME:
2548 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2550 if (!tipc_node_get_linkname(sock_net(sk),
2551 lnr.bearer_id & 0xffff, lnr.peer,
2552 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2553 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2557 return -EADDRNOTAVAIL;
2559 return -ENOIOCTLCMD;
2563 /* Protocol switches for the various types of TIPC sockets */
2565 static const struct proto_ops msg_ops = {
2566 .owner = THIS_MODULE,
2568 .release = tipc_release,
2570 .connect = tipc_connect,
2571 .socketpair = sock_no_socketpair,
2572 .accept = sock_no_accept,
2573 .getname = tipc_getname,
2575 .ioctl = tipc_ioctl,
2576 .listen = sock_no_listen,
2577 .shutdown = tipc_shutdown,
2578 .setsockopt = tipc_setsockopt,
2579 .getsockopt = tipc_getsockopt,
2580 .sendmsg = tipc_sendmsg,
2581 .recvmsg = tipc_recvmsg,
2582 .mmap = sock_no_mmap,
2583 .sendpage = sock_no_sendpage
2586 static const struct proto_ops packet_ops = {
2587 .owner = THIS_MODULE,
2589 .release = tipc_release,
2591 .connect = tipc_connect,
2592 .socketpair = sock_no_socketpair,
2593 .accept = tipc_accept,
2594 .getname = tipc_getname,
2596 .ioctl = tipc_ioctl,
2597 .listen = tipc_listen,
2598 .shutdown = tipc_shutdown,
2599 .setsockopt = tipc_setsockopt,
2600 .getsockopt = tipc_getsockopt,
2601 .sendmsg = tipc_send_packet,
2602 .recvmsg = tipc_recvmsg,
2603 .mmap = sock_no_mmap,
2604 .sendpage = sock_no_sendpage
2607 static const struct proto_ops stream_ops = {
2608 .owner = THIS_MODULE,
2610 .release = tipc_release,
2612 .connect = tipc_connect,
2613 .socketpair = sock_no_socketpair,
2614 .accept = tipc_accept,
2615 .getname = tipc_getname,
2617 .ioctl = tipc_ioctl,
2618 .listen = tipc_listen,
2619 .shutdown = tipc_shutdown,
2620 .setsockopt = tipc_setsockopt,
2621 .getsockopt = tipc_getsockopt,
2622 .sendmsg = tipc_send_stream,
2623 .recvmsg = tipc_recv_stream,
2624 .mmap = sock_no_mmap,
2625 .sendpage = sock_no_sendpage
2628 static const struct net_proto_family tipc_family_ops = {
2629 .owner = THIS_MODULE,
2631 .create = tipc_sk_create
2634 static struct proto tipc_proto = {
2636 .owner = THIS_MODULE,
2637 .obj_size = sizeof(struct tipc_sock),
2638 .sysctl_rmem = sysctl_tipc_rmem
2642 * tipc_socket_init - initialize TIPC socket interface
2644 * Returns 0 on success, errno otherwise
2646 int tipc_socket_init(void)
2650 res = proto_register(&tipc_proto, 1);
2652 pr_err("Failed to register TIPC protocol type\n");
2656 res = sock_register(&tipc_family_ops);
2658 pr_err("Failed to register TIPC socket type\n");
2659 proto_unregister(&tipc_proto);
2667 * tipc_socket_stop - stop TIPC socket interface
2669 void tipc_socket_stop(void)
2671 sock_unregister(tipc_family_ops.family);
2672 proto_unregister(&tipc_proto);
2675 /* Caller should hold socket lock for the passed tipc socket. */
2676 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2680 struct nlattr *nest;
2682 peer_node = tsk_peer_node(tsk);
2683 peer_port = tsk_peer_port(tsk);
2685 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2687 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2689 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2692 if (tsk->conn_type != 0) {
2693 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2695 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2697 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2700 nla_nest_end(skb, nest);
2705 nla_nest_cancel(skb, nest);
2710 /* Caller should hold socket lock for the passed tipc socket. */
2711 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2712 struct tipc_sock *tsk)
2716 struct nlattr *attrs;
2717 struct net *net = sock_net(skb->sk);
2718 struct tipc_net *tn = net_generic(net, tipc_net_id);
2719 struct sock *sk = &tsk->sk;
2721 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2722 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2726 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2728 goto genlmsg_cancel;
2729 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2730 goto attr_msg_cancel;
2731 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2732 goto attr_msg_cancel;
2734 if (tipc_sk_connected(sk)) {
2735 err = __tipc_nl_add_sk_con(skb, tsk);
2737 goto attr_msg_cancel;
2738 } else if (!list_empty(&tsk->publications)) {
2739 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2740 goto attr_msg_cancel;
2742 nla_nest_end(skb, attrs);
2743 genlmsg_end(skb, hdr);
2748 nla_nest_cancel(skb, attrs);
2750 genlmsg_cancel(skb, hdr);
2755 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2758 struct tipc_sock *tsk;
2759 const struct bucket_table *tbl;
2760 struct rhash_head *pos;
2761 struct net *net = sock_net(skb->sk);
2762 struct tipc_net *tn = net_generic(net, tipc_net_id);
2763 u32 tbl_id = cb->args[0];
2764 u32 prev_portid = cb->args[1];
2767 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2768 for (; tbl_id < tbl->size; tbl_id++) {
2769 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2770 spin_lock_bh(&tsk->sk.sk_lock.slock);
2771 if (prev_portid && prev_portid != tsk->portid) {
2772 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2776 err = __tipc_nl_add_sk(skb, cb, tsk);
2778 prev_portid = tsk->portid;
2779 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2783 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2788 cb->args[0] = tbl_id;
2789 cb->args[1] = prev_portid;
2794 /* Caller should hold socket lock for the passed tipc socket. */
2795 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2796 struct netlink_callback *cb,
2797 struct publication *publ)
2800 struct nlattr *attrs;
2802 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2803 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2807 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2809 goto genlmsg_cancel;
2811 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2812 goto attr_msg_cancel;
2813 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2814 goto attr_msg_cancel;
2815 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2816 goto attr_msg_cancel;
2817 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2818 goto attr_msg_cancel;
2820 nla_nest_end(skb, attrs);
2821 genlmsg_end(skb, hdr);
2826 nla_nest_cancel(skb, attrs);
2828 genlmsg_cancel(skb, hdr);
2833 /* Caller should hold socket lock for the passed tipc socket. */
2834 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2835 struct netlink_callback *cb,
2836 struct tipc_sock *tsk, u32 *last_publ)
2839 struct publication *p;
2842 list_for_each_entry(p, &tsk->publications, pport_list) {
2843 if (p->key == *last_publ)
2846 if (p->key != *last_publ) {
2847 /* We never set seq or call nl_dump_check_consistent()
2848 * this means that setting prev_seq here will cause the
2849 * consistence check to fail in the netlink callback
2850 * handler. Resulting in the last NLMSG_DONE message
2851 * having the NLM_F_DUMP_INTR flag set.
2858 p = list_first_entry(&tsk->publications, struct publication,
2862 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2863 err = __tipc_nl_add_sk_publ(skb, cb, p);
2865 *last_publ = p->key;
2874 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2877 u32 tsk_portid = cb->args[0];
2878 u32 last_publ = cb->args[1];
2879 u32 done = cb->args[2];
2880 struct net *net = sock_net(skb->sk);
2881 struct tipc_sock *tsk;
2884 struct nlattr **attrs;
2885 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2887 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2891 if (!attrs[TIPC_NLA_SOCK])
2894 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2895 attrs[TIPC_NLA_SOCK],
2896 tipc_nl_sock_policy);
2900 if (!sock[TIPC_NLA_SOCK_REF])
2903 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2909 tsk = tipc_sk_lookup(net, tsk_portid);
2913 lock_sock(&tsk->sk);
2914 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2917 release_sock(&tsk->sk);
2920 cb->args[0] = tsk_portid;
2921 cb->args[1] = last_publ;