clk: Fix potential NULL dereference in clk_fetch_parent_index()
[sfrench/cifs-2.6.git] / net / iucv / af_iucv.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  IUCV protocol stack for Linux on zSeries
4  *
5  *  Copyright IBM Corp. 2006, 2009
6  *
7  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
8  *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9  *  PM functions:
10  *              Ursula Braun <ursula.braun@de.ibm.com>
11  */
12
13 #define KMSG_COMPONENT "af_iucv"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/types.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/sched/signal.h>
23 #include <linux/slab.h>
24 #include <linux/skbuff.h>
25 #include <linux/init.h>
26 #include <linux/poll.h>
27 #include <linux/security.h>
28 #include <net/sock.h>
29 #include <asm/ebcdic.h>
30 #include <asm/cpcmd.h>
31 #include <linux/kmod.h>
32
33 #include <net/iucv/af_iucv.h>
34
35 #define VERSION "1.2"
36
37 static char iucv_userid[80];
38
39 static const struct proto_ops iucv_sock_ops;
40
41 static struct proto iucv_proto = {
42         .name           = "AF_IUCV",
43         .owner          = THIS_MODULE,
44         .obj_size       = sizeof(struct iucv_sock),
45 };
46
47 static struct iucv_interface *pr_iucv;
48
49 /* special AF_IUCV IPRM messages */
50 static const u8 iprm_shutdown[8] =
51         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
52
53 #define TRGCLS_SIZE     FIELD_SIZEOF(struct iucv_message, class)
54
55 #define __iucv_sock_wait(sk, condition, timeo, ret)                     \
56 do {                                                                    \
57         DEFINE_WAIT(__wait);                                            \
58         long __timeo = timeo;                                           \
59         ret = 0;                                                        \
60         prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
61         while (!(condition)) {                                          \
62                 if (!__timeo) {                                         \
63                         ret = -EAGAIN;                                  \
64                         break;                                          \
65                 }                                                       \
66                 if (signal_pending(current)) {                          \
67                         ret = sock_intr_errno(__timeo);                 \
68                         break;                                          \
69                 }                                                       \
70                 release_sock(sk);                                       \
71                 __timeo = schedule_timeout(__timeo);                    \
72                 lock_sock(sk);                                          \
73                 ret = sock_error(sk);                                   \
74                 if (ret)                                                \
75                         break;                                          \
76         }                                                               \
77         finish_wait(sk_sleep(sk), &__wait);                             \
78 } while (0)
79
80 #define iucv_sock_wait(sk, condition, timeo)                            \
81 ({                                                                      \
82         int __ret = 0;                                                  \
83         if (!(condition))                                               \
84                 __iucv_sock_wait(sk, condition, timeo, __ret);          \
85         __ret;                                                          \
86 })
87
88 static void iucv_sock_kill(struct sock *sk);
89 static void iucv_sock_close(struct sock *sk);
90 static void iucv_sever_path(struct sock *, int);
91
92 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
93         struct packet_type *pt, struct net_device *orig_dev);
94 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
95                    struct sk_buff *skb, u8 flags);
96 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
97
98 /* Call Back functions */
99 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
100 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
101 static void iucv_callback_connack(struct iucv_path *, u8 *);
102 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
103 static void iucv_callback_connrej(struct iucv_path *, u8 *);
104 static void iucv_callback_shutdown(struct iucv_path *, u8 *);
105
106 static struct iucv_sock_list iucv_sk_list = {
107         .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
108         .autobind_name = ATOMIC_INIT(0)
109 };
110
111 static struct iucv_handler af_iucv_handler = {
112         .path_pending     = iucv_callback_connreq,
113         .path_complete    = iucv_callback_connack,
114         .path_severed     = iucv_callback_connrej,
115         .message_pending  = iucv_callback_rx,
116         .message_complete = iucv_callback_txdone,
117         .path_quiesced    = iucv_callback_shutdown,
118 };
119
120 static inline void high_nmcpy(unsigned char *dst, char *src)
121 {
122        memcpy(dst, src, 8);
123 }
124
125 static inline void low_nmcpy(unsigned char *dst, char *src)
126 {
127        memcpy(&dst[8], src, 8);
128 }
129
130 static int afiucv_pm_prepare(struct device *dev)
131 {
132 #ifdef CONFIG_PM_DEBUG
133         printk(KERN_WARNING "afiucv_pm_prepare\n");
134 #endif
135         return 0;
136 }
137
138 static void afiucv_pm_complete(struct device *dev)
139 {
140 #ifdef CONFIG_PM_DEBUG
141         printk(KERN_WARNING "afiucv_pm_complete\n");
142 #endif
143 }
144
145 /**
146  * afiucv_pm_freeze() - Freeze PM callback
147  * @dev:        AFIUCV dummy device
148  *
149  * Sever all established IUCV communication pathes
150  */
151 static int afiucv_pm_freeze(struct device *dev)
152 {
153         struct iucv_sock *iucv;
154         struct sock *sk;
155
156 #ifdef CONFIG_PM_DEBUG
157         printk(KERN_WARNING "afiucv_pm_freeze\n");
158 #endif
159         read_lock(&iucv_sk_list.lock);
160         sk_for_each(sk, &iucv_sk_list.head) {
161                 iucv = iucv_sk(sk);
162                 switch (sk->sk_state) {
163                 case IUCV_DISCONN:
164                 case IUCV_CLOSING:
165                 case IUCV_CONNECTED:
166                         iucv_sever_path(sk, 0);
167                         break;
168                 case IUCV_OPEN:
169                 case IUCV_BOUND:
170                 case IUCV_LISTEN:
171                 case IUCV_CLOSED:
172                 default:
173                         break;
174                 }
175                 skb_queue_purge(&iucv->send_skb_q);
176                 skb_queue_purge(&iucv->backlog_skb_q);
177         }
178         read_unlock(&iucv_sk_list.lock);
179         return 0;
180 }
181
182 /**
183  * afiucv_pm_restore_thaw() - Thaw and restore PM callback
184  * @dev:        AFIUCV dummy device
185  *
186  * socket clean up after freeze
187  */
188 static int afiucv_pm_restore_thaw(struct device *dev)
189 {
190         struct sock *sk;
191
192 #ifdef CONFIG_PM_DEBUG
193         printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
194 #endif
195         read_lock(&iucv_sk_list.lock);
196         sk_for_each(sk, &iucv_sk_list.head) {
197                 switch (sk->sk_state) {
198                 case IUCV_CONNECTED:
199                         sk->sk_err = EPIPE;
200                         sk->sk_state = IUCV_DISCONN;
201                         sk->sk_state_change(sk);
202                         break;
203                 case IUCV_DISCONN:
204                 case IUCV_CLOSING:
205                 case IUCV_LISTEN:
206                 case IUCV_BOUND:
207                 case IUCV_OPEN:
208                 default:
209                         break;
210                 }
211         }
212         read_unlock(&iucv_sk_list.lock);
213         return 0;
214 }
215
216 static const struct dev_pm_ops afiucv_pm_ops = {
217         .prepare = afiucv_pm_prepare,
218         .complete = afiucv_pm_complete,
219         .freeze = afiucv_pm_freeze,
220         .thaw = afiucv_pm_restore_thaw,
221         .restore = afiucv_pm_restore_thaw,
222 };
223
224 static struct device_driver af_iucv_driver = {
225         .owner = THIS_MODULE,
226         .name = "afiucv",
227         .bus  = NULL,
228         .pm   = &afiucv_pm_ops,
229 };
230
231 /* dummy device used as trigger for PM functions */
232 static struct device *af_iucv_dev;
233
234 /**
235  * iucv_msg_length() - Returns the length of an iucv message.
236  * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
237  *
238  * The function returns the length of the specified iucv message @msg of data
239  * stored in a buffer and of data stored in the parameter list (PRMDATA).
240  *
241  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
242  * data:
243  *      PRMDATA[0..6]   socket data (max 7 bytes);
244  *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
245  *
246  * The socket data length is computed by subtracting the socket data length
247  * value from 0xFF.
248  * If the socket data len is greater 7, then PRMDATA can be used for special
249  * notifications (see iucv_sock_shutdown); and further,
250  * if the socket data len is > 7, the function returns 8.
251  *
252  * Use this function to allocate socket buffers to store iucv message data.
253  */
254 static inline size_t iucv_msg_length(struct iucv_message *msg)
255 {
256         size_t datalen;
257
258         if (msg->flags & IUCV_IPRMDATA) {
259                 datalen = 0xff - msg->rmmsg[7];
260                 return (datalen < 8) ? datalen : 8;
261         }
262         return msg->length;
263 }
264
265 /**
266  * iucv_sock_in_state() - check for specific states
267  * @sk:         sock structure
268  * @state:      first iucv sk state
269  * @state:      second iucv sk state
270  *
271  * Returns true if the socket in either in the first or second state.
272  */
273 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
274 {
275         return (sk->sk_state == state || sk->sk_state == state2);
276 }
277
278 /**
279  * iucv_below_msglim() - function to check if messages can be sent
280  * @sk:         sock structure
281  *
282  * Returns true if the send queue length is lower than the message limit.
283  * Always returns true if the socket is not connected (no iucv path for
284  * checking the message limit).
285  */
286 static inline int iucv_below_msglim(struct sock *sk)
287 {
288         struct iucv_sock *iucv = iucv_sk(sk);
289
290         if (sk->sk_state != IUCV_CONNECTED)
291                 return 1;
292         if (iucv->transport == AF_IUCV_TRANS_IUCV)
293                 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
294         else
295                 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
296                         (atomic_read(&iucv->pendings) <= 0));
297 }
298
299 /**
300  * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
301  */
302 static void iucv_sock_wake_msglim(struct sock *sk)
303 {
304         struct socket_wq *wq;
305
306         rcu_read_lock();
307         wq = rcu_dereference(sk->sk_wq);
308         if (skwq_has_sleeper(wq))
309                 wake_up_interruptible_all(&wq->wait);
310         sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
311         rcu_read_unlock();
312 }
313
314 /**
315  * afiucv_hs_send() - send a message through HiperSockets transport
316  */
317 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
318                    struct sk_buff *skb, u8 flags)
319 {
320         struct iucv_sock *iucv = iucv_sk(sock);
321         struct af_iucv_trans_hdr *phs_hdr;
322         struct sk_buff *nskb;
323         int err, confirm_recv = 0;
324
325         phs_hdr = skb_push(skb, sizeof(*phs_hdr));
326         memset(phs_hdr, 0, sizeof(*phs_hdr));
327         skb_reset_network_header(skb);
328
329         phs_hdr->magic = ETH_P_AF_IUCV;
330         phs_hdr->version = 1;
331         phs_hdr->flags = flags;
332         if (flags == AF_IUCV_FLAG_SYN)
333                 phs_hdr->window = iucv->msglimit;
334         else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
335                 confirm_recv = atomic_read(&iucv->msg_recv);
336                 phs_hdr->window = confirm_recv;
337                 if (confirm_recv)
338                         phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
339         }
340         memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
341         memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
342         memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
343         memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
344         ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
345         ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
346         ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
347         ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
348         if (imsg)
349                 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
350
351         skb->dev = iucv->hs_dev;
352         if (!skb->dev) {
353                 err = -ENODEV;
354                 goto err_free;
355         }
356
357         dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
358
359         if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
360                 err = -ENETDOWN;
361                 goto err_free;
362         }
363         if (skb->len > skb->dev->mtu) {
364                 if (sock->sk_type == SOCK_SEQPACKET) {
365                         err = -EMSGSIZE;
366                         goto err_free;
367                 }
368                 skb_trim(skb, skb->dev->mtu);
369         }
370         skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
371
372         __skb_header_release(skb);
373         nskb = skb_clone(skb, GFP_ATOMIC);
374         if (!nskb) {
375                 err = -ENOMEM;
376                 goto err_free;
377         }
378
379         skb_queue_tail(&iucv->send_skb_q, nskb);
380         err = dev_queue_xmit(skb);
381         if (net_xmit_eval(err)) {
382                 skb_unlink(nskb, &iucv->send_skb_q);
383                 kfree_skb(nskb);
384         } else {
385                 atomic_sub(confirm_recv, &iucv->msg_recv);
386                 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
387         }
388         return net_xmit_eval(err);
389
390 err_free:
391         kfree_skb(skb);
392         return err;
393 }
394
395 static struct sock *__iucv_get_sock_by_name(char *nm)
396 {
397         struct sock *sk;
398
399         sk_for_each(sk, &iucv_sk_list.head)
400                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
401                         return sk;
402
403         return NULL;
404 }
405
406 static void iucv_sock_destruct(struct sock *sk)
407 {
408         skb_queue_purge(&sk->sk_receive_queue);
409         skb_queue_purge(&sk->sk_error_queue);
410
411         sk_mem_reclaim(sk);
412
413         if (!sock_flag(sk, SOCK_DEAD)) {
414                 pr_err("Attempt to release alive iucv socket %p\n", sk);
415                 return;
416         }
417
418         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
419         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
420         WARN_ON(sk->sk_wmem_queued);
421         WARN_ON(sk->sk_forward_alloc);
422 }
423
424 /* Cleanup Listen */
425 static void iucv_sock_cleanup_listen(struct sock *parent)
426 {
427         struct sock *sk;
428
429         /* Close non-accepted connections */
430         while ((sk = iucv_accept_dequeue(parent, NULL))) {
431                 iucv_sock_close(sk);
432                 iucv_sock_kill(sk);
433         }
434
435         parent->sk_state = IUCV_CLOSED;
436 }
437
438 /* Kill socket (only if zapped and orphaned) */
439 static void iucv_sock_kill(struct sock *sk)
440 {
441         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
442                 return;
443
444         iucv_sock_unlink(&iucv_sk_list, sk);
445         sock_set_flag(sk, SOCK_DEAD);
446         sock_put(sk);
447 }
448
449 /* Terminate an IUCV path */
450 static void iucv_sever_path(struct sock *sk, int with_user_data)
451 {
452         unsigned char user_data[16];
453         struct iucv_sock *iucv = iucv_sk(sk);
454         struct iucv_path *path = iucv->path;
455
456         if (iucv->path) {
457                 iucv->path = NULL;
458                 if (with_user_data) {
459                         low_nmcpy(user_data, iucv->src_name);
460                         high_nmcpy(user_data, iucv->dst_name);
461                         ASCEBC(user_data, sizeof(user_data));
462                         pr_iucv->path_sever(path, user_data);
463                 } else
464                         pr_iucv->path_sever(path, NULL);
465                 iucv_path_free(path);
466         }
467 }
468
469 /* Send controlling flags through an IUCV socket for HIPER transport */
470 static int iucv_send_ctrl(struct sock *sk, u8 flags)
471 {
472         struct iucv_sock *iucv = iucv_sk(sk);
473         int err = 0;
474         int blen;
475         struct sk_buff *skb;
476         u8 shutdown = 0;
477
478         blen = sizeof(struct af_iucv_trans_hdr) +
479                LL_RESERVED_SPACE(iucv->hs_dev);
480         if (sk->sk_shutdown & SEND_SHUTDOWN) {
481                 /* controlling flags should be sent anyway */
482                 shutdown = sk->sk_shutdown;
483                 sk->sk_shutdown &= RCV_SHUTDOWN;
484         }
485         skb = sock_alloc_send_skb(sk, blen, 1, &err);
486         if (skb) {
487                 skb_reserve(skb, blen);
488                 err = afiucv_hs_send(NULL, sk, skb, flags);
489         }
490         if (shutdown)
491                 sk->sk_shutdown = shutdown;
492         return err;
493 }
494
495 /* Close an IUCV socket */
496 static void iucv_sock_close(struct sock *sk)
497 {
498         struct iucv_sock *iucv = iucv_sk(sk);
499         unsigned long timeo;
500         int err = 0;
501
502         lock_sock(sk);
503
504         switch (sk->sk_state) {
505         case IUCV_LISTEN:
506                 iucv_sock_cleanup_listen(sk);
507                 break;
508
509         case IUCV_CONNECTED:
510                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
511                         err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
512                         sk->sk_state = IUCV_DISCONN;
513                         sk->sk_state_change(sk);
514                 }
515         case IUCV_DISCONN:   /* fall through */
516                 sk->sk_state = IUCV_CLOSING;
517                 sk->sk_state_change(sk);
518
519                 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
520                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
521                                 timeo = sk->sk_lingertime;
522                         else
523                                 timeo = IUCV_DISCONN_TIMEOUT;
524                         iucv_sock_wait(sk,
525                                         iucv_sock_in_state(sk, IUCV_CLOSED, 0),
526                                         timeo);
527                 }
528
529         case IUCV_CLOSING:   /* fall through */
530                 sk->sk_state = IUCV_CLOSED;
531                 sk->sk_state_change(sk);
532
533                 sk->sk_err = ECONNRESET;
534                 sk->sk_state_change(sk);
535
536                 skb_queue_purge(&iucv->send_skb_q);
537                 skb_queue_purge(&iucv->backlog_skb_q);
538
539         default:   /* fall through */
540                 iucv_sever_path(sk, 1);
541         }
542
543         if (iucv->hs_dev) {
544                 dev_put(iucv->hs_dev);
545                 iucv->hs_dev = NULL;
546                 sk->sk_bound_dev_if = 0;
547         }
548
549         /* mark socket for deletion by iucv_sock_kill() */
550         sock_set_flag(sk, SOCK_ZAPPED);
551
552         release_sock(sk);
553 }
554
555 static void iucv_sock_init(struct sock *sk, struct sock *parent)
556 {
557         if (parent) {
558                 sk->sk_type = parent->sk_type;
559                 security_sk_clone(parent, sk);
560         }
561 }
562
563 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
564 {
565         struct sock *sk;
566         struct iucv_sock *iucv;
567
568         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
569         if (!sk)
570                 return NULL;
571         iucv = iucv_sk(sk);
572
573         sock_init_data(sock, sk);
574         INIT_LIST_HEAD(&iucv->accept_q);
575         spin_lock_init(&iucv->accept_q_lock);
576         skb_queue_head_init(&iucv->send_skb_q);
577         INIT_LIST_HEAD(&iucv->message_q.list);
578         spin_lock_init(&iucv->message_q.lock);
579         skb_queue_head_init(&iucv->backlog_skb_q);
580         iucv->send_tag = 0;
581         atomic_set(&iucv->pendings, 0);
582         iucv->flags = 0;
583         iucv->msglimit = 0;
584         atomic_set(&iucv->msg_sent, 0);
585         atomic_set(&iucv->msg_recv, 0);
586         iucv->path = NULL;
587         iucv->sk_txnotify = afiucv_hs_callback_txnotify;
588         memset(&iucv->src_user_id , 0, 32);
589         if (pr_iucv)
590                 iucv->transport = AF_IUCV_TRANS_IUCV;
591         else
592                 iucv->transport = AF_IUCV_TRANS_HIPER;
593
594         sk->sk_destruct = iucv_sock_destruct;
595         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
596
597         sock_reset_flag(sk, SOCK_ZAPPED);
598
599         sk->sk_protocol = proto;
600         sk->sk_state    = IUCV_OPEN;
601
602         iucv_sock_link(&iucv_sk_list, sk);
603         return sk;
604 }
605
606 /* Create an IUCV socket */
607 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
608                             int kern)
609 {
610         struct sock *sk;
611
612         if (protocol && protocol != PF_IUCV)
613                 return -EPROTONOSUPPORT;
614
615         sock->state = SS_UNCONNECTED;
616
617         switch (sock->type) {
618         case SOCK_STREAM:
619                 sock->ops = &iucv_sock_ops;
620                 break;
621         case SOCK_SEQPACKET:
622                 /* currently, proto ops can handle both sk types */
623                 sock->ops = &iucv_sock_ops;
624                 break;
625         default:
626                 return -ESOCKTNOSUPPORT;
627         }
628
629         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
630         if (!sk)
631                 return -ENOMEM;
632
633         iucv_sock_init(sk, NULL);
634
635         return 0;
636 }
637
638 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
639 {
640         write_lock_bh(&l->lock);
641         sk_add_node(sk, &l->head);
642         write_unlock_bh(&l->lock);
643 }
644
645 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
646 {
647         write_lock_bh(&l->lock);
648         sk_del_node_init(sk);
649         write_unlock_bh(&l->lock);
650 }
651
652 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
653 {
654         unsigned long flags;
655         struct iucv_sock *par = iucv_sk(parent);
656
657         sock_hold(sk);
658         spin_lock_irqsave(&par->accept_q_lock, flags);
659         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
660         spin_unlock_irqrestore(&par->accept_q_lock, flags);
661         iucv_sk(sk)->parent = parent;
662         sk_acceptq_added(parent);
663 }
664
665 void iucv_accept_unlink(struct sock *sk)
666 {
667         unsigned long flags;
668         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
669
670         spin_lock_irqsave(&par->accept_q_lock, flags);
671         list_del_init(&iucv_sk(sk)->accept_q);
672         spin_unlock_irqrestore(&par->accept_q_lock, flags);
673         sk_acceptq_removed(iucv_sk(sk)->parent);
674         iucv_sk(sk)->parent = NULL;
675         sock_put(sk);
676 }
677
678 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
679 {
680         struct iucv_sock *isk, *n;
681         struct sock *sk;
682
683         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
684                 sk = (struct sock *) isk;
685                 lock_sock(sk);
686
687                 if (sk->sk_state == IUCV_CLOSED) {
688                         iucv_accept_unlink(sk);
689                         release_sock(sk);
690                         continue;
691                 }
692
693                 if (sk->sk_state == IUCV_CONNECTED ||
694                     sk->sk_state == IUCV_DISCONN ||
695                     !newsock) {
696                         iucv_accept_unlink(sk);
697                         if (newsock)
698                                 sock_graft(sk, newsock);
699
700                         release_sock(sk);
701                         return sk;
702                 }
703
704                 release_sock(sk);
705         }
706         return NULL;
707 }
708
709 static void __iucv_auto_name(struct iucv_sock *iucv)
710 {
711         char name[12];
712
713         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
714         while (__iucv_get_sock_by_name(name)) {
715                 sprintf(name, "%08x",
716                         atomic_inc_return(&iucv_sk_list.autobind_name));
717         }
718         memcpy(iucv->src_name, name, 8);
719 }
720
721 /* Bind an unbound socket */
722 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
723                           int addr_len)
724 {
725         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
726         struct sock *sk = sock->sk;
727         struct iucv_sock *iucv;
728         int err = 0;
729         struct net_device *dev;
730         char uid[9];
731
732         /* Verify the input sockaddr */
733         if (addr_len < sizeof(struct sockaddr_iucv) ||
734             addr->sa_family != AF_IUCV)
735                 return -EINVAL;
736
737         lock_sock(sk);
738         if (sk->sk_state != IUCV_OPEN) {
739                 err = -EBADFD;
740                 goto done;
741         }
742
743         write_lock_bh(&iucv_sk_list.lock);
744
745         iucv = iucv_sk(sk);
746         if (__iucv_get_sock_by_name(sa->siucv_name)) {
747                 err = -EADDRINUSE;
748                 goto done_unlock;
749         }
750         if (iucv->path)
751                 goto done_unlock;
752
753         /* Bind the socket */
754         if (pr_iucv)
755                 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
756                         goto vm_bind; /* VM IUCV transport */
757
758         /* try hiper transport */
759         memcpy(uid, sa->siucv_user_id, sizeof(uid));
760         ASCEBC(uid, 8);
761         rcu_read_lock();
762         for_each_netdev_rcu(&init_net, dev) {
763                 if (!memcmp(dev->perm_addr, uid, 8)) {
764                         memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
765                         /* Check for unitialized siucv_name */
766                         if (strncmp(sa->siucv_name, "        ", 8) == 0)
767                                 __iucv_auto_name(iucv);
768                         else
769                                 memcpy(iucv->src_name, sa->siucv_name, 8);
770                         sk->sk_bound_dev_if = dev->ifindex;
771                         iucv->hs_dev = dev;
772                         dev_hold(dev);
773                         sk->sk_state = IUCV_BOUND;
774                         iucv->transport = AF_IUCV_TRANS_HIPER;
775                         if (!iucv->msglimit)
776                                 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
777                         rcu_read_unlock();
778                         goto done_unlock;
779                 }
780         }
781         rcu_read_unlock();
782 vm_bind:
783         if (pr_iucv) {
784                 /* use local userid for backward compat */
785                 memcpy(iucv->src_name, sa->siucv_name, 8);
786                 memcpy(iucv->src_user_id, iucv_userid, 8);
787                 sk->sk_state = IUCV_BOUND;
788                 iucv->transport = AF_IUCV_TRANS_IUCV;
789                 sk->sk_allocation |= GFP_DMA;
790                 if (!iucv->msglimit)
791                         iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
792                 goto done_unlock;
793         }
794         /* found no dev to bind */
795         err = -ENODEV;
796 done_unlock:
797         /* Release the socket list lock */
798         write_unlock_bh(&iucv_sk_list.lock);
799 done:
800         release_sock(sk);
801         return err;
802 }
803
804 /* Automatically bind an unbound socket */
805 static int iucv_sock_autobind(struct sock *sk)
806 {
807         struct iucv_sock *iucv = iucv_sk(sk);
808         int err = 0;
809
810         if (unlikely(!pr_iucv))
811                 return -EPROTO;
812
813         memcpy(iucv->src_user_id, iucv_userid, 8);
814         iucv->transport = AF_IUCV_TRANS_IUCV;
815         sk->sk_allocation |= GFP_DMA;
816
817         write_lock_bh(&iucv_sk_list.lock);
818         __iucv_auto_name(iucv);
819         write_unlock_bh(&iucv_sk_list.lock);
820
821         if (!iucv->msglimit)
822                 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
823
824         return err;
825 }
826
827 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
828 {
829         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
830         struct sock *sk = sock->sk;
831         struct iucv_sock *iucv = iucv_sk(sk);
832         unsigned char user_data[16];
833         int err;
834
835         high_nmcpy(user_data, sa->siucv_name);
836         low_nmcpy(user_data, iucv->src_name);
837         ASCEBC(user_data, sizeof(user_data));
838
839         /* Create path. */
840         iucv->path = iucv_path_alloc(iucv->msglimit,
841                                      IUCV_IPRMDATA, GFP_KERNEL);
842         if (!iucv->path) {
843                 err = -ENOMEM;
844                 goto done;
845         }
846         err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
847                                     sa->siucv_user_id, NULL, user_data,
848                                     sk);
849         if (err) {
850                 iucv_path_free(iucv->path);
851                 iucv->path = NULL;
852                 switch (err) {
853                 case 0x0b:      /* Target communicator is not logged on */
854                         err = -ENETUNREACH;
855                         break;
856                 case 0x0d:      /* Max connections for this guest exceeded */
857                 case 0x0e:      /* Max connections for target guest exceeded */
858                         err = -EAGAIN;
859                         break;
860                 case 0x0f:      /* Missing IUCV authorization */
861                         err = -EACCES;
862                         break;
863                 default:
864                         err = -ECONNREFUSED;
865                         break;
866                 }
867         }
868 done:
869         return err;
870 }
871
872 /* Connect an unconnected socket */
873 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
874                              int alen, int flags)
875 {
876         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
877         struct sock *sk = sock->sk;
878         struct iucv_sock *iucv = iucv_sk(sk);
879         int err;
880
881         if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
882                 return -EINVAL;
883
884         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
885                 return -EBADFD;
886
887         if (sk->sk_state == IUCV_OPEN &&
888             iucv->transport == AF_IUCV_TRANS_HIPER)
889                 return -EBADFD; /* explicit bind required */
890
891         if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
892                 return -EINVAL;
893
894         if (sk->sk_state == IUCV_OPEN) {
895                 err = iucv_sock_autobind(sk);
896                 if (unlikely(err))
897                         return err;
898         }
899
900         lock_sock(sk);
901
902         /* Set the destination information */
903         memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
904         memcpy(iucv->dst_name, sa->siucv_name, 8);
905
906         if (iucv->transport == AF_IUCV_TRANS_HIPER)
907                 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
908         else
909                 err = afiucv_path_connect(sock, addr);
910         if (err)
911                 goto done;
912
913         if (sk->sk_state != IUCV_CONNECTED)
914                 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
915                                                             IUCV_DISCONN),
916                                      sock_sndtimeo(sk, flags & O_NONBLOCK));
917
918         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
919                 err = -ECONNREFUSED;
920
921         if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
922                 iucv_sever_path(sk, 0);
923
924 done:
925         release_sock(sk);
926         return err;
927 }
928
929 /* Move a socket into listening state. */
930 static int iucv_sock_listen(struct socket *sock, int backlog)
931 {
932         struct sock *sk = sock->sk;
933         int err;
934
935         lock_sock(sk);
936
937         err = -EINVAL;
938         if (sk->sk_state != IUCV_BOUND)
939                 goto done;
940
941         if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
942                 goto done;
943
944         sk->sk_max_ack_backlog = backlog;
945         sk->sk_ack_backlog = 0;
946         sk->sk_state = IUCV_LISTEN;
947         err = 0;
948
949 done:
950         release_sock(sk);
951         return err;
952 }
953
954 /* Accept a pending connection */
955 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
956                             int flags, bool kern)
957 {
958         DECLARE_WAITQUEUE(wait, current);
959         struct sock *sk = sock->sk, *nsk;
960         long timeo;
961         int err = 0;
962
963         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
964
965         if (sk->sk_state != IUCV_LISTEN) {
966                 err = -EBADFD;
967                 goto done;
968         }
969
970         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
971
972         /* Wait for an incoming connection */
973         add_wait_queue_exclusive(sk_sleep(sk), &wait);
974         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
975                 set_current_state(TASK_INTERRUPTIBLE);
976                 if (!timeo) {
977                         err = -EAGAIN;
978                         break;
979                 }
980
981                 release_sock(sk);
982                 timeo = schedule_timeout(timeo);
983                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
984
985                 if (sk->sk_state != IUCV_LISTEN) {
986                         err = -EBADFD;
987                         break;
988                 }
989
990                 if (signal_pending(current)) {
991                         err = sock_intr_errno(timeo);
992                         break;
993                 }
994         }
995
996         set_current_state(TASK_RUNNING);
997         remove_wait_queue(sk_sleep(sk), &wait);
998
999         if (err)
1000                 goto done;
1001
1002         newsock->state = SS_CONNECTED;
1003
1004 done:
1005         release_sock(sk);
1006         return err;
1007 }
1008
1009 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
1010                              int peer)
1011 {
1012         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
1013         struct sock *sk = sock->sk;
1014         struct iucv_sock *iucv = iucv_sk(sk);
1015
1016         addr->sa_family = AF_IUCV;
1017
1018         if (peer) {
1019                 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1020                 memcpy(siucv->siucv_name, iucv->dst_name, 8);
1021         } else {
1022                 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1023                 memcpy(siucv->siucv_name, iucv->src_name, 8);
1024         }
1025         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1026         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1027         memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1028
1029         return sizeof(struct sockaddr_iucv);
1030 }
1031
1032 /**
1033  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1034  * @path:       IUCV path
1035  * @msg:        Pointer to a struct iucv_message
1036  * @skb:        The socket data to send, skb->len MUST BE <= 7
1037  *
1038  * Send the socket data in the parameter list in the iucv message
1039  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1040  * list and the socket data len at index 7 (last byte).
1041  * See also iucv_msg_length().
1042  *
1043  * Returns the error code from the iucv_message_send() call.
1044  */
1045 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1046                           struct sk_buff *skb)
1047 {
1048         u8 prmdata[8];
1049
1050         memcpy(prmdata, (void *) skb->data, skb->len);
1051         prmdata[7] = 0xff - (u8) skb->len;
1052         return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1053                                  (void *) prmdata, 8);
1054 }
1055
1056 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1057                              size_t len)
1058 {
1059         struct sock *sk = sock->sk;
1060         struct iucv_sock *iucv = iucv_sk(sk);
1061         size_t headroom = 0;
1062         size_t linear;
1063         struct sk_buff *skb;
1064         struct iucv_message txmsg = {0};
1065         struct cmsghdr *cmsg;
1066         int cmsg_done;
1067         long timeo;
1068         char user_id[9];
1069         char appl_id[9];
1070         int err;
1071         int noblock = msg->msg_flags & MSG_DONTWAIT;
1072
1073         err = sock_error(sk);
1074         if (err)
1075                 return err;
1076
1077         if (msg->msg_flags & MSG_OOB)
1078                 return -EOPNOTSUPP;
1079
1080         /* SOCK_SEQPACKET: we do not support segmented records */
1081         if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1082                 return -EOPNOTSUPP;
1083
1084         lock_sock(sk);
1085
1086         if (sk->sk_shutdown & SEND_SHUTDOWN) {
1087                 err = -EPIPE;
1088                 goto out;
1089         }
1090
1091         /* Return if the socket is not in connected state */
1092         if (sk->sk_state != IUCV_CONNECTED) {
1093                 err = -ENOTCONN;
1094                 goto out;
1095         }
1096
1097         /* initialize defaults */
1098         cmsg_done   = 0;        /* check for duplicate headers */
1099         txmsg.class = 0;
1100
1101         /* iterate over control messages */
1102         for_each_cmsghdr(cmsg, msg) {
1103                 if (!CMSG_OK(msg, cmsg)) {
1104                         err = -EINVAL;
1105                         goto out;
1106                 }
1107
1108                 if (cmsg->cmsg_level != SOL_IUCV)
1109                         continue;
1110
1111                 if (cmsg->cmsg_type & cmsg_done) {
1112                         err = -EINVAL;
1113                         goto out;
1114                 }
1115                 cmsg_done |= cmsg->cmsg_type;
1116
1117                 switch (cmsg->cmsg_type) {
1118                 case SCM_IUCV_TRGCLS:
1119                         if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1120                                 err = -EINVAL;
1121                                 goto out;
1122                         }
1123
1124                         /* set iucv message target class */
1125                         memcpy(&txmsg.class,
1126                                 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1127
1128                         break;
1129
1130                 default:
1131                         err = -EINVAL;
1132                         goto out;
1133                 }
1134         }
1135
1136         /* allocate one skb for each iucv message:
1137          * this is fine for SOCK_SEQPACKET (unless we want to support
1138          * segmented records using the MSG_EOR flag), but
1139          * for SOCK_STREAM we might want to improve it in future */
1140         if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1141                 headroom = sizeof(struct af_iucv_trans_hdr) +
1142                            LL_RESERVED_SPACE(iucv->hs_dev);
1143                 linear = len;
1144         } else {
1145                 if (len < PAGE_SIZE) {
1146                         linear = len;
1147                 } else {
1148                         /* In nonlinear "classic" iucv skb,
1149                          * reserve space for iucv_array
1150                          */
1151                         headroom = sizeof(struct iucv_array) *
1152                                    (MAX_SKB_FRAGS + 1);
1153                         linear = PAGE_SIZE - headroom;
1154                 }
1155         }
1156         skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1157                                    noblock, &err, 0);
1158         if (!skb)
1159                 goto out;
1160         if (headroom)
1161                 skb_reserve(skb, headroom);
1162         skb_put(skb, linear);
1163         skb->len = len;
1164         skb->data_len = len - linear;
1165         err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1166         if (err)
1167                 goto fail;
1168
1169         /* wait if outstanding messages for iucv path has reached */
1170         timeo = sock_sndtimeo(sk, noblock);
1171         err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1172         if (err)
1173                 goto fail;
1174
1175         /* return -ECONNRESET if the socket is no longer connected */
1176         if (sk->sk_state != IUCV_CONNECTED) {
1177                 err = -ECONNRESET;
1178                 goto fail;
1179         }
1180
1181         /* increment and save iucv message tag for msg_completion cbk */
1182         txmsg.tag = iucv->send_tag++;
1183         IUCV_SKB_CB(skb)->tag = txmsg.tag;
1184
1185         if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1186                 atomic_inc(&iucv->msg_sent);
1187                 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1188                 if (err) {
1189                         atomic_dec(&iucv->msg_sent);
1190                         goto out;
1191                 }
1192         } else { /* Classic VM IUCV transport */
1193                 skb_queue_tail(&iucv->send_skb_q, skb);
1194
1195                 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1196                     skb->len <= 7) {
1197                         err = iucv_send_iprm(iucv->path, &txmsg, skb);
1198
1199                         /* on success: there is no message_complete callback */
1200                         /* for an IPRMDATA msg; remove skb from send queue   */
1201                         if (err == 0) {
1202                                 skb_unlink(skb, &iucv->send_skb_q);
1203                                 kfree_skb(skb);
1204                         }
1205
1206                         /* this error should never happen since the     */
1207                         /* IUCV_IPRMDATA path flag is set... sever path */
1208                         if (err == 0x15) {
1209                                 pr_iucv->path_sever(iucv->path, NULL);
1210                                 skb_unlink(skb, &iucv->send_skb_q);
1211                                 err = -EPIPE;
1212                                 goto fail;
1213                         }
1214                 } else if (skb_is_nonlinear(skb)) {
1215                         struct iucv_array *iba = (struct iucv_array *)skb->head;
1216                         int i;
1217
1218                         /* skip iucv_array lying in the headroom */
1219                         iba[0].address = (u32)(addr_t)skb->data;
1220                         iba[0].length = (u32)skb_headlen(skb);
1221                         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1222                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1223
1224                                 iba[i + 1].address =
1225                                         (u32)(addr_t)skb_frag_address(frag);
1226                                 iba[i + 1].length = (u32)skb_frag_size(frag);
1227                         }
1228                         err = pr_iucv->message_send(iucv->path, &txmsg,
1229                                                     IUCV_IPBUFLST, 0,
1230                                                     (void *)iba, skb->len);
1231                 } else { /* non-IPRM Linear skb */
1232                         err = pr_iucv->message_send(iucv->path, &txmsg,
1233                                         0, 0, (void *)skb->data, skb->len);
1234                 }
1235                 if (err) {
1236                         if (err == 3) {
1237                                 user_id[8] = 0;
1238                                 memcpy(user_id, iucv->dst_user_id, 8);
1239                                 appl_id[8] = 0;
1240                                 memcpy(appl_id, iucv->dst_name, 8);
1241                                 pr_err(
1242                 "Application %s on z/VM guest %s exceeds message limit\n",
1243                                         appl_id, user_id);
1244                                 err = -EAGAIN;
1245                         } else {
1246                                 err = -EPIPE;
1247                         }
1248                         skb_unlink(skb, &iucv->send_skb_q);
1249                         goto fail;
1250                 }
1251         }
1252
1253         release_sock(sk);
1254         return len;
1255
1256 fail:
1257         kfree_skb(skb);
1258 out:
1259         release_sock(sk);
1260         return err;
1261 }
1262
1263 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1264 {
1265         size_t headroom, linear;
1266         struct sk_buff *skb;
1267         int err;
1268
1269         if (len < PAGE_SIZE) {
1270                 headroom = 0;
1271                 linear = len;
1272         } else {
1273                 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1274                 linear = PAGE_SIZE - headroom;
1275         }
1276         skb = alloc_skb_with_frags(headroom + linear, len - linear,
1277                                    0, &err, GFP_ATOMIC | GFP_DMA);
1278         WARN_ONCE(!skb,
1279                   "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1280                   len, err);
1281         if (skb) {
1282                 if (headroom)
1283                         skb_reserve(skb, headroom);
1284                 skb_put(skb, linear);
1285                 skb->len = len;
1286                 skb->data_len = len - linear;
1287         }
1288         return skb;
1289 }
1290
1291 /* iucv_process_message() - Receive a single outstanding IUCV message
1292  *
1293  * Locking: must be called with message_q.lock held
1294  */
1295 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1296                                  struct iucv_path *path,
1297                                  struct iucv_message *msg)
1298 {
1299         int rc;
1300         unsigned int len;
1301
1302         len = iucv_msg_length(msg);
1303
1304         /* store msg target class in the second 4 bytes of skb ctrl buffer */
1305         /* Note: the first 4 bytes are reserved for msg tag */
1306         IUCV_SKB_CB(skb)->class = msg->class;
1307
1308         /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1309         if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1310                 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1311                         skb->data = NULL;
1312                         skb->len = 0;
1313                 }
1314         } else {
1315                 if (skb_is_nonlinear(skb)) {
1316                         struct iucv_array *iba = (struct iucv_array *)skb->head;
1317                         int i;
1318
1319                         iba[0].address = (u32)(addr_t)skb->data;
1320                         iba[0].length = (u32)skb_headlen(skb);
1321                         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1322                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1323
1324                                 iba[i + 1].address =
1325                                         (u32)(addr_t)skb_frag_address(frag);
1326                                 iba[i + 1].length = (u32)skb_frag_size(frag);
1327                         }
1328                         rc = pr_iucv->message_receive(path, msg,
1329                                               IUCV_IPBUFLST,
1330                                               (void *)iba, len, NULL);
1331                 } else {
1332                         rc = pr_iucv->message_receive(path, msg,
1333                                               msg->flags & IUCV_IPRMDATA,
1334                                               skb->data, len, NULL);
1335                 }
1336                 if (rc) {
1337                         kfree_skb(skb);
1338                         return;
1339                 }
1340                 WARN_ON_ONCE(skb->len != len);
1341         }
1342
1343         IUCV_SKB_CB(skb)->offset = 0;
1344         if (sk_filter(sk, skb)) {
1345                 atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
1346                 kfree_skb(skb);
1347                 return;
1348         }
1349         if (__sock_queue_rcv_skb(sk, skb))      /* handle rcv queue full */
1350                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1351 }
1352
1353 /* iucv_process_message_q() - Process outstanding IUCV messages
1354  *
1355  * Locking: must be called with message_q.lock held
1356  */
1357 static void iucv_process_message_q(struct sock *sk)
1358 {
1359         struct iucv_sock *iucv = iucv_sk(sk);
1360         struct sk_buff *skb;
1361         struct sock_msg_q *p, *n;
1362
1363         list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1364                 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1365                 if (!skb)
1366                         break;
1367                 iucv_process_message(sk, skb, p->path, &p->msg);
1368                 list_del(&p->list);
1369                 kfree(p);
1370                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1371                         break;
1372         }
1373 }
1374
1375 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1376                              size_t len, int flags)
1377 {
1378         int noblock = flags & MSG_DONTWAIT;
1379         struct sock *sk = sock->sk;
1380         struct iucv_sock *iucv = iucv_sk(sk);
1381         unsigned int copied, rlen;
1382         struct sk_buff *skb, *rskb, *cskb;
1383         int err = 0;
1384         u32 offset;
1385
1386         if ((sk->sk_state == IUCV_DISCONN) &&
1387             skb_queue_empty(&iucv->backlog_skb_q) &&
1388             skb_queue_empty(&sk->sk_receive_queue) &&
1389             list_empty(&iucv->message_q.list))
1390                 return 0;
1391
1392         if (flags & (MSG_OOB))
1393                 return -EOPNOTSUPP;
1394
1395         /* receive/dequeue next skb:
1396          * the function understands MSG_PEEK and, thus, does not dequeue skb */
1397         skb = skb_recv_datagram(sk, flags, noblock, &err);
1398         if (!skb) {
1399                 if (sk->sk_shutdown & RCV_SHUTDOWN)
1400                         return 0;
1401                 return err;
1402         }
1403
1404         offset = IUCV_SKB_CB(skb)->offset;
1405         rlen   = skb->len - offset;             /* real length of skb */
1406         copied = min_t(unsigned int, rlen, len);
1407         if (!rlen)
1408                 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1409
1410         cskb = skb;
1411         if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1412                 if (!(flags & MSG_PEEK))
1413                         skb_queue_head(&sk->sk_receive_queue, skb);
1414                 return -EFAULT;
1415         }
1416
1417         /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1418         if (sk->sk_type == SOCK_SEQPACKET) {
1419                 if (copied < rlen)
1420                         msg->msg_flags |= MSG_TRUNC;
1421                 /* each iucv message contains a complete record */
1422                 msg->msg_flags |= MSG_EOR;
1423         }
1424
1425         /* create control message to store iucv msg target class:
1426          * get the trgcls from the control buffer of the skb due to
1427          * fragmentation of original iucv message. */
1428         err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1429                        sizeof(IUCV_SKB_CB(skb)->class),
1430                        (void *)&IUCV_SKB_CB(skb)->class);
1431         if (err) {
1432                 if (!(flags & MSG_PEEK))
1433                         skb_queue_head(&sk->sk_receive_queue, skb);
1434                 return err;
1435         }
1436
1437         /* Mark read part of skb as used */
1438         if (!(flags & MSG_PEEK)) {
1439
1440                 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1441                 if (sk->sk_type == SOCK_STREAM) {
1442                         if (copied < rlen) {
1443                                 IUCV_SKB_CB(skb)->offset = offset + copied;
1444                                 skb_queue_head(&sk->sk_receive_queue, skb);
1445                                 goto done;
1446                         }
1447                 }
1448
1449                 kfree_skb(skb);
1450                 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1451                         atomic_inc(&iucv->msg_recv);
1452                         if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1453                                 WARN_ON(1);
1454                                 iucv_sock_close(sk);
1455                                 return -EFAULT;
1456                         }
1457                 }
1458
1459                 /* Queue backlog skbs */
1460                 spin_lock_bh(&iucv->message_q.lock);
1461                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1462                 while (rskb) {
1463                         IUCV_SKB_CB(rskb)->offset = 0;
1464                         if (__sock_queue_rcv_skb(sk, rskb)) {
1465                                 /* handle rcv queue full */
1466                                 skb_queue_head(&iucv->backlog_skb_q,
1467                                                 rskb);
1468                                 break;
1469                         }
1470                         rskb = skb_dequeue(&iucv->backlog_skb_q);
1471                 }
1472                 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1473                         if (!list_empty(&iucv->message_q.list))
1474                                 iucv_process_message_q(sk);
1475                         if (atomic_read(&iucv->msg_recv) >=
1476                                                         iucv->msglimit / 2) {
1477                                 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1478                                 if (err) {
1479                                         sk->sk_state = IUCV_DISCONN;
1480                                         sk->sk_state_change(sk);
1481                                 }
1482                         }
1483                 }
1484                 spin_unlock_bh(&iucv->message_q.lock);
1485         }
1486
1487 done:
1488         /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1489         if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1490                 copied = rlen;
1491
1492         return copied;
1493 }
1494
1495 static inline __poll_t iucv_accept_poll(struct sock *parent)
1496 {
1497         struct iucv_sock *isk, *n;
1498         struct sock *sk;
1499
1500         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1501                 sk = (struct sock *) isk;
1502
1503                 if (sk->sk_state == IUCV_CONNECTED)
1504                         return EPOLLIN | EPOLLRDNORM;
1505         }
1506
1507         return 0;
1508 }
1509
1510 __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1511                             poll_table *wait)
1512 {
1513         struct sock *sk = sock->sk;
1514         __poll_t mask = 0;
1515
1516         sock_poll_wait(file, sock, wait);
1517
1518         if (sk->sk_state == IUCV_LISTEN)
1519                 return iucv_accept_poll(sk);
1520
1521         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1522                 mask |= EPOLLERR |
1523                         (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1524
1525         if (sk->sk_shutdown & RCV_SHUTDOWN)
1526                 mask |= EPOLLRDHUP;
1527
1528         if (sk->sk_shutdown == SHUTDOWN_MASK)
1529                 mask |= EPOLLHUP;
1530
1531         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1532             (sk->sk_shutdown & RCV_SHUTDOWN))
1533                 mask |= EPOLLIN | EPOLLRDNORM;
1534
1535         if (sk->sk_state == IUCV_CLOSED)
1536                 mask |= EPOLLHUP;
1537
1538         if (sk->sk_state == IUCV_DISCONN)
1539                 mask |= EPOLLIN;
1540
1541         if (sock_writeable(sk) && iucv_below_msglim(sk))
1542                 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1543         else
1544                 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1545
1546         return mask;
1547 }
1548
1549 static int iucv_sock_shutdown(struct socket *sock, int how)
1550 {
1551         struct sock *sk = sock->sk;
1552         struct iucv_sock *iucv = iucv_sk(sk);
1553         struct iucv_message txmsg;
1554         int err = 0;
1555
1556         how++;
1557
1558         if ((how & ~SHUTDOWN_MASK) || !how)
1559                 return -EINVAL;
1560
1561         lock_sock(sk);
1562         switch (sk->sk_state) {
1563         case IUCV_LISTEN:
1564         case IUCV_DISCONN:
1565         case IUCV_CLOSING:
1566         case IUCV_CLOSED:
1567                 err = -ENOTCONN;
1568                 goto fail;
1569         default:
1570                 break;
1571         }
1572
1573         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1574                 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1575                         txmsg.class = 0;
1576                         txmsg.tag = 0;
1577                         err = pr_iucv->message_send(iucv->path, &txmsg,
1578                                 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1579                         if (err) {
1580                                 switch (err) {
1581                                 case 1:
1582                                         err = -ENOTCONN;
1583                                         break;
1584                                 case 2:
1585                                         err = -ECONNRESET;
1586                                         break;
1587                                 default:
1588                                         err = -ENOTCONN;
1589                                         break;
1590                                 }
1591                         }
1592                 } else
1593                         iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1594         }
1595
1596         sk->sk_shutdown |= how;
1597         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1598                 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1599                     iucv->path) {
1600                         err = pr_iucv->path_quiesce(iucv->path, NULL);
1601                         if (err)
1602                                 err = -ENOTCONN;
1603 /*                      skb_queue_purge(&sk->sk_receive_queue); */
1604                 }
1605                 skb_queue_purge(&sk->sk_receive_queue);
1606         }
1607
1608         /* Wake up anyone sleeping in poll */
1609         sk->sk_state_change(sk);
1610
1611 fail:
1612         release_sock(sk);
1613         return err;
1614 }
1615
1616 static int iucv_sock_release(struct socket *sock)
1617 {
1618         struct sock *sk = sock->sk;
1619         int err = 0;
1620
1621         if (!sk)
1622                 return 0;
1623
1624         iucv_sock_close(sk);
1625
1626         sock_orphan(sk);
1627         iucv_sock_kill(sk);
1628         return err;
1629 }
1630
1631 /* getsockopt and setsockopt */
1632 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1633                                 char __user *optval, unsigned int optlen)
1634 {
1635         struct sock *sk = sock->sk;
1636         struct iucv_sock *iucv = iucv_sk(sk);
1637         int val;
1638         int rc;
1639
1640         if (level != SOL_IUCV)
1641                 return -ENOPROTOOPT;
1642
1643         if (optlen < sizeof(int))
1644                 return -EINVAL;
1645
1646         if (get_user(val, (int __user *) optval))
1647                 return -EFAULT;
1648
1649         rc = 0;
1650
1651         lock_sock(sk);
1652         switch (optname) {
1653         case SO_IPRMDATA_MSG:
1654                 if (val)
1655                         iucv->flags |= IUCV_IPRMDATA;
1656                 else
1657                         iucv->flags &= ~IUCV_IPRMDATA;
1658                 break;
1659         case SO_MSGLIMIT:
1660                 switch (sk->sk_state) {
1661                 case IUCV_OPEN:
1662                 case IUCV_BOUND:
1663                         if (val < 1 || val > (u16)(~0))
1664                                 rc = -EINVAL;
1665                         else
1666                                 iucv->msglimit = val;
1667                         break;
1668                 default:
1669                         rc = -EINVAL;
1670                         break;
1671                 }
1672                 break;
1673         default:
1674                 rc = -ENOPROTOOPT;
1675                 break;
1676         }
1677         release_sock(sk);
1678
1679         return rc;
1680 }
1681
1682 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1683                                 char __user *optval, int __user *optlen)
1684 {
1685         struct sock *sk = sock->sk;
1686         struct iucv_sock *iucv = iucv_sk(sk);
1687         unsigned int val;
1688         int len;
1689
1690         if (level != SOL_IUCV)
1691                 return -ENOPROTOOPT;
1692
1693         if (get_user(len, optlen))
1694                 return -EFAULT;
1695
1696         if (len < 0)
1697                 return -EINVAL;
1698
1699         len = min_t(unsigned int, len, sizeof(int));
1700
1701         switch (optname) {
1702         case SO_IPRMDATA_MSG:
1703                 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1704                 break;
1705         case SO_MSGLIMIT:
1706                 lock_sock(sk);
1707                 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1708                                            : iucv->msglimit;    /* default */
1709                 release_sock(sk);
1710                 break;
1711         case SO_MSGSIZE:
1712                 if (sk->sk_state == IUCV_OPEN)
1713                         return -EBADFD;
1714                 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1715                                 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1716                                 0x7fffffff;
1717                 break;
1718         default:
1719                 return -ENOPROTOOPT;
1720         }
1721
1722         if (put_user(len, optlen))
1723                 return -EFAULT;
1724         if (copy_to_user(optval, &val, len))
1725                 return -EFAULT;
1726
1727         return 0;
1728 }
1729
1730
1731 /* Callback wrappers - called from iucv base support */
1732 static int iucv_callback_connreq(struct iucv_path *path,
1733                                  u8 ipvmid[8], u8 ipuser[16])
1734 {
1735         unsigned char user_data[16];
1736         unsigned char nuser_data[16];
1737         unsigned char src_name[8];
1738         struct sock *sk, *nsk;
1739         struct iucv_sock *iucv, *niucv;
1740         int err;
1741
1742         memcpy(src_name, ipuser, 8);
1743         EBCASC(src_name, 8);
1744         /* Find out if this path belongs to af_iucv. */
1745         read_lock(&iucv_sk_list.lock);
1746         iucv = NULL;
1747         sk = NULL;
1748         sk_for_each(sk, &iucv_sk_list.head)
1749                 if (sk->sk_state == IUCV_LISTEN &&
1750                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1751                         /*
1752                          * Found a listening socket with
1753                          * src_name == ipuser[0-7].
1754                          */
1755                         iucv = iucv_sk(sk);
1756                         break;
1757                 }
1758         read_unlock(&iucv_sk_list.lock);
1759         if (!iucv)
1760                 /* No socket found, not one of our paths. */
1761                 return -EINVAL;
1762
1763         bh_lock_sock(sk);
1764
1765         /* Check if parent socket is listening */
1766         low_nmcpy(user_data, iucv->src_name);
1767         high_nmcpy(user_data, iucv->dst_name);
1768         ASCEBC(user_data, sizeof(user_data));
1769         if (sk->sk_state != IUCV_LISTEN) {
1770                 err = pr_iucv->path_sever(path, user_data);
1771                 iucv_path_free(path);
1772                 goto fail;
1773         }
1774
1775         /* Check for backlog size */
1776         if (sk_acceptq_is_full(sk)) {
1777                 err = pr_iucv->path_sever(path, user_data);
1778                 iucv_path_free(path);
1779                 goto fail;
1780         }
1781
1782         /* Create the new socket */
1783         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1784         if (!nsk) {
1785                 err = pr_iucv->path_sever(path, user_data);
1786                 iucv_path_free(path);
1787                 goto fail;
1788         }
1789
1790         niucv = iucv_sk(nsk);
1791         iucv_sock_init(nsk, sk);
1792         niucv->transport = AF_IUCV_TRANS_IUCV;
1793         nsk->sk_allocation |= GFP_DMA;
1794
1795         /* Set the new iucv_sock */
1796         memcpy(niucv->dst_name, ipuser + 8, 8);
1797         EBCASC(niucv->dst_name, 8);
1798         memcpy(niucv->dst_user_id, ipvmid, 8);
1799         memcpy(niucv->src_name, iucv->src_name, 8);
1800         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1801         niucv->path = path;
1802
1803         /* Call iucv_accept */
1804         high_nmcpy(nuser_data, ipuser + 8);
1805         memcpy(nuser_data + 8, niucv->src_name, 8);
1806         ASCEBC(nuser_data + 8, 8);
1807
1808         /* set message limit for path based on msglimit of accepting socket */
1809         niucv->msglimit = iucv->msglimit;
1810         path->msglim = iucv->msglimit;
1811         err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1812         if (err) {
1813                 iucv_sever_path(nsk, 1);
1814                 iucv_sock_kill(nsk);
1815                 goto fail;
1816         }
1817
1818         iucv_accept_enqueue(sk, nsk);
1819
1820         /* Wake up accept */
1821         nsk->sk_state = IUCV_CONNECTED;
1822         sk->sk_data_ready(sk);
1823         err = 0;
1824 fail:
1825         bh_unlock_sock(sk);
1826         return 0;
1827 }
1828
1829 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1830 {
1831         struct sock *sk = path->private;
1832
1833         sk->sk_state = IUCV_CONNECTED;
1834         sk->sk_state_change(sk);
1835 }
1836
1837 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1838 {
1839         struct sock *sk = path->private;
1840         struct iucv_sock *iucv = iucv_sk(sk);
1841         struct sk_buff *skb;
1842         struct sock_msg_q *save_msg;
1843         int len;
1844
1845         if (sk->sk_shutdown & RCV_SHUTDOWN) {
1846                 pr_iucv->message_reject(path, msg);
1847                 return;
1848         }
1849
1850         spin_lock(&iucv->message_q.lock);
1851
1852         if (!list_empty(&iucv->message_q.list) ||
1853             !skb_queue_empty(&iucv->backlog_skb_q))
1854                 goto save_message;
1855
1856         len = atomic_read(&sk->sk_rmem_alloc);
1857         len += SKB_TRUESIZE(iucv_msg_length(msg));
1858         if (len > sk->sk_rcvbuf)
1859                 goto save_message;
1860
1861         skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1862         if (!skb)
1863                 goto save_message;
1864
1865         iucv_process_message(sk, skb, path, msg);
1866         goto out_unlock;
1867
1868 save_message:
1869         save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1870         if (!save_msg)
1871                 goto out_unlock;
1872         save_msg->path = path;
1873         save_msg->msg = *msg;
1874
1875         list_add_tail(&save_msg->list, &iucv->message_q.list);
1876
1877 out_unlock:
1878         spin_unlock(&iucv->message_q.lock);
1879 }
1880
1881 static void iucv_callback_txdone(struct iucv_path *path,
1882                                  struct iucv_message *msg)
1883 {
1884         struct sock *sk = path->private;
1885         struct sk_buff *this = NULL;
1886         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1887         struct sk_buff *list_skb;
1888         unsigned long flags;
1889
1890         bh_lock_sock(sk);
1891
1892         spin_lock_irqsave(&list->lock, flags);
1893         skb_queue_walk(list, list_skb) {
1894                 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1895                         this = list_skb;
1896                         break;
1897                 }
1898         }
1899         if (this)
1900                 __skb_unlink(this, list);
1901         spin_unlock_irqrestore(&list->lock, flags);
1902
1903         if (this) {
1904                 kfree_skb(this);
1905                 /* wake up any process waiting for sending */
1906                 iucv_sock_wake_msglim(sk);
1907         }
1908
1909         if (sk->sk_state == IUCV_CLOSING) {
1910                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1911                         sk->sk_state = IUCV_CLOSED;
1912                         sk->sk_state_change(sk);
1913                 }
1914         }
1915         bh_unlock_sock(sk);
1916
1917 }
1918
1919 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1920 {
1921         struct sock *sk = path->private;
1922
1923         if (sk->sk_state == IUCV_CLOSED)
1924                 return;
1925
1926         bh_lock_sock(sk);
1927         iucv_sever_path(sk, 1);
1928         sk->sk_state = IUCV_DISCONN;
1929
1930         sk->sk_state_change(sk);
1931         bh_unlock_sock(sk);
1932 }
1933
1934 /* called if the other communication side shuts down its RECV direction;
1935  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1936  */
1937 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1938 {
1939         struct sock *sk = path->private;
1940
1941         bh_lock_sock(sk);
1942         if (sk->sk_state != IUCV_CLOSED) {
1943                 sk->sk_shutdown |= SEND_SHUTDOWN;
1944                 sk->sk_state_change(sk);
1945         }
1946         bh_unlock_sock(sk);
1947 }
1948
1949 /***************** HiperSockets transport callbacks ********************/
1950 static void afiucv_swap_src_dest(struct sk_buff *skb)
1951 {
1952         struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1953         char tmpID[8];
1954         char tmpName[8];
1955
1956         ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1957         ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1958         ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1959         ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1960         memcpy(tmpID, trans_hdr->srcUserID, 8);
1961         memcpy(tmpName, trans_hdr->srcAppName, 8);
1962         memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1963         memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1964         memcpy(trans_hdr->destUserID, tmpID, 8);
1965         memcpy(trans_hdr->destAppName, tmpName, 8);
1966         skb_push(skb, ETH_HLEN);
1967         memset(skb->data, 0, ETH_HLEN);
1968 }
1969
1970 /**
1971  * afiucv_hs_callback_syn - react on received SYN
1972  **/
1973 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1974 {
1975         struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1976         struct sock *nsk;
1977         struct iucv_sock *iucv, *niucv;
1978         int err;
1979
1980         iucv = iucv_sk(sk);
1981         if (!iucv) {
1982                 /* no sock - connection refused */
1983                 afiucv_swap_src_dest(skb);
1984                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1985                 err = dev_queue_xmit(skb);
1986                 goto out;
1987         }
1988
1989         nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1990         bh_lock_sock(sk);
1991         if ((sk->sk_state != IUCV_LISTEN) ||
1992             sk_acceptq_is_full(sk) ||
1993             !nsk) {
1994                 /* error on server socket - connection refused */
1995                 afiucv_swap_src_dest(skb);
1996                 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1997                 err = dev_queue_xmit(skb);
1998                 iucv_sock_kill(nsk);
1999                 bh_unlock_sock(sk);
2000                 goto out;
2001         }
2002
2003         niucv = iucv_sk(nsk);
2004         iucv_sock_init(nsk, sk);
2005         niucv->transport = AF_IUCV_TRANS_HIPER;
2006         niucv->msglimit = iucv->msglimit;
2007         if (!trans_hdr->window)
2008                 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
2009         else
2010                 niucv->msglimit_peer = trans_hdr->window;
2011         memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
2012         memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
2013         memcpy(niucv->src_name, iucv->src_name, 8);
2014         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
2015         nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
2016         niucv->hs_dev = iucv->hs_dev;
2017         dev_hold(niucv->hs_dev);
2018         afiucv_swap_src_dest(skb);
2019         trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
2020         trans_hdr->window = niucv->msglimit;
2021         /* if receiver acks the xmit connection is established */
2022         err = dev_queue_xmit(skb);
2023         if (!err) {
2024                 iucv_accept_enqueue(sk, nsk);
2025                 nsk->sk_state = IUCV_CONNECTED;
2026                 sk->sk_data_ready(sk);
2027         } else
2028                 iucv_sock_kill(nsk);
2029         bh_unlock_sock(sk);
2030
2031 out:
2032         return NET_RX_SUCCESS;
2033 }
2034
2035 /**
2036  * afiucv_hs_callback_synack() - react on received SYN-ACK
2037  **/
2038 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2039 {
2040         struct iucv_sock *iucv = iucv_sk(sk);
2041
2042         if (!iucv)
2043                 goto out;
2044         if (sk->sk_state != IUCV_BOUND)
2045                 goto out;
2046         bh_lock_sock(sk);
2047         iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
2048         sk->sk_state = IUCV_CONNECTED;
2049         sk->sk_state_change(sk);
2050         bh_unlock_sock(sk);
2051 out:
2052         kfree_skb(skb);
2053         return NET_RX_SUCCESS;
2054 }
2055
2056 /**
2057  * afiucv_hs_callback_synfin() - react on received SYN_FIN
2058  **/
2059 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2060 {
2061         struct iucv_sock *iucv = iucv_sk(sk);
2062
2063         if (!iucv)
2064                 goto out;
2065         if (sk->sk_state != IUCV_BOUND)
2066                 goto out;
2067         bh_lock_sock(sk);
2068         sk->sk_state = IUCV_DISCONN;
2069         sk->sk_state_change(sk);
2070         bh_unlock_sock(sk);
2071 out:
2072         kfree_skb(skb);
2073         return NET_RX_SUCCESS;
2074 }
2075
2076 /**
2077  * afiucv_hs_callback_fin() - react on received FIN
2078  **/
2079 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2080 {
2081         struct iucv_sock *iucv = iucv_sk(sk);
2082
2083         /* other end of connection closed */
2084         if (!iucv)
2085                 goto out;
2086         bh_lock_sock(sk);
2087         if (sk->sk_state == IUCV_CONNECTED) {
2088                 sk->sk_state = IUCV_DISCONN;
2089                 sk->sk_state_change(sk);
2090         }
2091         bh_unlock_sock(sk);
2092 out:
2093         kfree_skb(skb);
2094         return NET_RX_SUCCESS;
2095 }
2096
2097 /**
2098  * afiucv_hs_callback_win() - react on received WIN
2099  **/
2100 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2101 {
2102         struct iucv_sock *iucv = iucv_sk(sk);
2103
2104         if (!iucv)
2105                 return NET_RX_SUCCESS;
2106
2107         if (sk->sk_state != IUCV_CONNECTED)
2108                 return NET_RX_SUCCESS;
2109
2110         atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
2111         iucv_sock_wake_msglim(sk);
2112         return NET_RX_SUCCESS;
2113 }
2114
2115 /**
2116  * afiucv_hs_callback_rx() - react on received data
2117  **/
2118 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2119 {
2120         struct iucv_sock *iucv = iucv_sk(sk);
2121
2122         if (!iucv) {
2123                 kfree_skb(skb);
2124                 return NET_RX_SUCCESS;
2125         }
2126
2127         if (sk->sk_state != IUCV_CONNECTED) {
2128                 kfree_skb(skb);
2129                 return NET_RX_SUCCESS;
2130         }
2131
2132         if (sk->sk_shutdown & RCV_SHUTDOWN) {
2133                 kfree_skb(skb);
2134                 return NET_RX_SUCCESS;
2135         }
2136
2137         /* write stuff from iucv_msg to skb cb */
2138         skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2139         skb_reset_transport_header(skb);
2140         skb_reset_network_header(skb);
2141         IUCV_SKB_CB(skb)->offset = 0;
2142         if (sk_filter(sk, skb)) {
2143                 atomic_inc(&sk->sk_drops);      /* skb rejected by filter */
2144                 kfree_skb(skb);
2145                 return NET_RX_SUCCESS;
2146         }
2147
2148         spin_lock(&iucv->message_q.lock);
2149         if (skb_queue_empty(&iucv->backlog_skb_q)) {
2150                 if (__sock_queue_rcv_skb(sk, skb))
2151                         /* handle rcv queue full */
2152                         skb_queue_tail(&iucv->backlog_skb_q, skb);
2153         } else
2154                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2155         spin_unlock(&iucv->message_q.lock);
2156         return NET_RX_SUCCESS;
2157 }
2158
2159 /**
2160  * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2161  *                   transport
2162  *                   called from netif RX softirq
2163  **/
2164 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2165         struct packet_type *pt, struct net_device *orig_dev)
2166 {
2167         struct sock *sk;
2168         struct iucv_sock *iucv;
2169         struct af_iucv_trans_hdr *trans_hdr;
2170         int err = NET_RX_SUCCESS;
2171         char nullstring[8];
2172
2173         if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
2174                 WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
2175                 kfree_skb(skb);
2176                 return NET_RX_SUCCESS;
2177         }
2178
2179         trans_hdr = iucv_trans_hdr(skb);
2180         EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2181         EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2182         EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2183         EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2184         memset(nullstring, 0, sizeof(nullstring));
2185         iucv = NULL;
2186         sk = NULL;
2187         read_lock(&iucv_sk_list.lock);
2188         sk_for_each(sk, &iucv_sk_list.head) {
2189                 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2190                         if ((!memcmp(&iucv_sk(sk)->src_name,
2191                                      trans_hdr->destAppName, 8)) &&
2192                             (!memcmp(&iucv_sk(sk)->src_user_id,
2193                                      trans_hdr->destUserID, 8)) &&
2194                             (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2195                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2196                                      nullstring, 8))) {
2197                                 iucv = iucv_sk(sk);
2198                                 break;
2199                         }
2200                 } else {
2201                         if ((!memcmp(&iucv_sk(sk)->src_name,
2202                                      trans_hdr->destAppName, 8)) &&
2203                             (!memcmp(&iucv_sk(sk)->src_user_id,
2204                                      trans_hdr->destUserID, 8)) &&
2205                             (!memcmp(&iucv_sk(sk)->dst_name,
2206                                      trans_hdr->srcAppName, 8)) &&
2207                             (!memcmp(&iucv_sk(sk)->dst_user_id,
2208                                      trans_hdr->srcUserID, 8))) {
2209                                 iucv = iucv_sk(sk);
2210                                 break;
2211                         }
2212                 }
2213         }
2214         read_unlock(&iucv_sk_list.lock);
2215         if (!iucv)
2216                 sk = NULL;
2217
2218         /* no sock
2219         how should we send with no sock
2220         1) send without sock no send rc checking?
2221         2) introduce default sock to handle this cases
2222
2223          SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2224          data -> send FIN
2225          SYN|ACK, SYN|FIN, FIN -> no action? */
2226
2227         switch (trans_hdr->flags) {
2228         case AF_IUCV_FLAG_SYN:
2229                 /* connect request */
2230                 err = afiucv_hs_callback_syn(sk, skb);
2231                 break;
2232         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2233                 /* connect request confirmed */
2234                 err = afiucv_hs_callback_synack(sk, skb);
2235                 break;
2236         case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2237                 /* connect request refused */
2238                 err = afiucv_hs_callback_synfin(sk, skb);
2239                 break;
2240         case (AF_IUCV_FLAG_FIN):
2241                 /* close request */
2242                 err = afiucv_hs_callback_fin(sk, skb);
2243                 break;
2244         case (AF_IUCV_FLAG_WIN):
2245                 err = afiucv_hs_callback_win(sk, skb);
2246                 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2247                         kfree_skb(skb);
2248                         break;
2249                 }
2250                 /* fall through and receive non-zero length data */
2251         case (AF_IUCV_FLAG_SHT):
2252                 /* shutdown request */
2253                 /* fall through and receive zero length data */
2254         case 0:
2255                 /* plain data frame */
2256                 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2257                 err = afiucv_hs_callback_rx(sk, skb);
2258                 break;
2259         default:
2260                 kfree_skb(skb);
2261         }
2262
2263         return err;
2264 }
2265
2266 /**
2267  * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2268  *                                 transport
2269  **/
2270 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2271                                         enum iucv_tx_notify n)
2272 {
2273         struct sock *isk = skb->sk;
2274         struct sock *sk = NULL;
2275         struct iucv_sock *iucv = NULL;
2276         struct sk_buff_head *list;
2277         struct sk_buff *list_skb;
2278         struct sk_buff *nskb;
2279         unsigned long flags;
2280
2281         read_lock_irqsave(&iucv_sk_list.lock, flags);
2282         sk_for_each(sk, &iucv_sk_list.head)
2283                 if (sk == isk) {
2284                         iucv = iucv_sk(sk);
2285                         break;
2286                 }
2287         read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2288
2289         if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2290                 return;
2291
2292         list = &iucv->send_skb_q;
2293         spin_lock_irqsave(&list->lock, flags);
2294         skb_queue_walk_safe(list, list_skb, nskb) {
2295                 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2296                         switch (n) {
2297                         case TX_NOTIFY_OK:
2298                                 __skb_unlink(list_skb, list);
2299                                 kfree_skb(list_skb);
2300                                 iucv_sock_wake_msglim(sk);
2301                                 break;
2302                         case TX_NOTIFY_PENDING:
2303                                 atomic_inc(&iucv->pendings);
2304                                 break;
2305                         case TX_NOTIFY_DELAYED_OK:
2306                                 __skb_unlink(list_skb, list);
2307                                 atomic_dec(&iucv->pendings);
2308                                 if (atomic_read(&iucv->pendings) <= 0)
2309                                         iucv_sock_wake_msglim(sk);
2310                                 kfree_skb(list_skb);
2311                                 break;
2312                         case TX_NOTIFY_UNREACHABLE:
2313                         case TX_NOTIFY_DELAYED_UNREACHABLE:
2314                         case TX_NOTIFY_TPQFULL: /* not yet used */
2315                         case TX_NOTIFY_GENERALERROR:
2316                         case TX_NOTIFY_DELAYED_GENERALERROR:
2317                                 __skb_unlink(list_skb, list);
2318                                 kfree_skb(list_skb);
2319                                 if (sk->sk_state == IUCV_CONNECTED) {
2320                                         sk->sk_state = IUCV_DISCONN;
2321                                         sk->sk_state_change(sk);
2322                                 }
2323                                 break;
2324                         }
2325                         break;
2326                 }
2327         }
2328         spin_unlock_irqrestore(&list->lock, flags);
2329
2330         if (sk->sk_state == IUCV_CLOSING) {
2331                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2332                         sk->sk_state = IUCV_CLOSED;
2333                         sk->sk_state_change(sk);
2334                 }
2335         }
2336
2337 }
2338
2339 /*
2340  * afiucv_netdev_event: handle netdev notifier chain events
2341  */
2342 static int afiucv_netdev_event(struct notifier_block *this,
2343                                unsigned long event, void *ptr)
2344 {
2345         struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2346         struct sock *sk;
2347         struct iucv_sock *iucv;
2348
2349         switch (event) {
2350         case NETDEV_REBOOT:
2351         case NETDEV_GOING_DOWN:
2352                 sk_for_each(sk, &iucv_sk_list.head) {
2353                         iucv = iucv_sk(sk);
2354                         if ((iucv->hs_dev == event_dev) &&
2355                             (sk->sk_state == IUCV_CONNECTED)) {
2356                                 if (event == NETDEV_GOING_DOWN)
2357                                         iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2358                                 sk->sk_state = IUCV_DISCONN;
2359                                 sk->sk_state_change(sk);
2360                         }
2361                 }
2362                 break;
2363         case NETDEV_DOWN:
2364         case NETDEV_UNREGISTER:
2365         default:
2366                 break;
2367         }
2368         return NOTIFY_DONE;
2369 }
2370
2371 static struct notifier_block afiucv_netdev_notifier = {
2372         .notifier_call = afiucv_netdev_event,
2373 };
2374
2375 static const struct proto_ops iucv_sock_ops = {
2376         .family         = PF_IUCV,
2377         .owner          = THIS_MODULE,
2378         .release        = iucv_sock_release,
2379         .bind           = iucv_sock_bind,
2380         .connect        = iucv_sock_connect,
2381         .listen         = iucv_sock_listen,
2382         .accept         = iucv_sock_accept,
2383         .getname        = iucv_sock_getname,
2384         .sendmsg        = iucv_sock_sendmsg,
2385         .recvmsg        = iucv_sock_recvmsg,
2386         .poll           = iucv_sock_poll,
2387         .ioctl          = sock_no_ioctl,
2388         .mmap           = sock_no_mmap,
2389         .socketpair     = sock_no_socketpair,
2390         .shutdown       = iucv_sock_shutdown,
2391         .setsockopt     = iucv_sock_setsockopt,
2392         .getsockopt     = iucv_sock_getsockopt,
2393 };
2394
2395 static const struct net_proto_family iucv_sock_family_ops = {
2396         .family = AF_IUCV,
2397         .owner  = THIS_MODULE,
2398         .create = iucv_sock_create,
2399 };
2400
2401 static struct packet_type iucv_packet_type = {
2402         .type = cpu_to_be16(ETH_P_AF_IUCV),
2403         .func = afiucv_hs_rcv,
2404 };
2405
2406 static int afiucv_iucv_init(void)
2407 {
2408         int err;
2409
2410         err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2411         if (err)
2412                 goto out;
2413         /* establish dummy device */
2414         af_iucv_driver.bus = pr_iucv->bus;
2415         err = driver_register(&af_iucv_driver);
2416         if (err)
2417                 goto out_iucv;
2418         af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2419         if (!af_iucv_dev) {
2420                 err = -ENOMEM;
2421                 goto out_driver;
2422         }
2423         dev_set_name(af_iucv_dev, "af_iucv");
2424         af_iucv_dev->bus = pr_iucv->bus;
2425         af_iucv_dev->parent = pr_iucv->root;
2426         af_iucv_dev->release = (void (*)(struct device *))kfree;
2427         af_iucv_dev->driver = &af_iucv_driver;
2428         err = device_register(af_iucv_dev);
2429         if (err)
2430                 goto out_iucv_dev;
2431         return 0;
2432
2433 out_iucv_dev:
2434         put_device(af_iucv_dev);
2435 out_driver:
2436         driver_unregister(&af_iucv_driver);
2437 out_iucv:
2438         pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2439 out:
2440         return err;
2441 }
2442
2443 static void afiucv_iucv_exit(void)
2444 {
2445         device_unregister(af_iucv_dev);
2446         driver_unregister(&af_iucv_driver);
2447         pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2448 }
2449
2450 static int __init afiucv_init(void)
2451 {
2452         int err;
2453
2454         if (MACHINE_IS_VM) {
2455                 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2456                 if (unlikely(err)) {
2457                         WARN_ON(err);
2458                         err = -EPROTONOSUPPORT;
2459                         goto out;
2460                 }
2461
2462                 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2463                 if (!pr_iucv) {
2464                         printk(KERN_WARNING "iucv_if lookup failed\n");
2465                         memset(&iucv_userid, 0, sizeof(iucv_userid));
2466                 }
2467         } else {
2468                 memset(&iucv_userid, 0, sizeof(iucv_userid));
2469                 pr_iucv = NULL;
2470         }
2471
2472         err = proto_register(&iucv_proto, 0);
2473         if (err)
2474                 goto out;
2475         err = sock_register(&iucv_sock_family_ops);
2476         if (err)
2477                 goto out_proto;
2478
2479         if (pr_iucv) {
2480                 err = afiucv_iucv_init();
2481                 if (err)
2482                         goto out_sock;
2483         }
2484
2485         err = register_netdevice_notifier(&afiucv_netdev_notifier);
2486         if (err)
2487                 goto out_notifier;
2488
2489         dev_add_pack(&iucv_packet_type);
2490         return 0;
2491
2492 out_notifier:
2493         if (pr_iucv)
2494                 afiucv_iucv_exit();
2495 out_sock:
2496         sock_unregister(PF_IUCV);
2497 out_proto:
2498         proto_unregister(&iucv_proto);
2499 out:
2500         if (pr_iucv)
2501                 symbol_put(iucv_if);
2502         return err;
2503 }
2504
2505 static void __exit afiucv_exit(void)
2506 {
2507         if (pr_iucv) {
2508                 afiucv_iucv_exit();
2509                 symbol_put(iucv_if);
2510         }
2511
2512         unregister_netdevice_notifier(&afiucv_netdev_notifier);
2513         dev_remove_pack(&iucv_packet_type);
2514         sock_unregister(PF_IUCV);
2515         proto_unregister(&iucv_proto);
2516 }
2517
2518 module_init(afiucv_init);
2519 module_exit(afiucv_exit);
2520
2521 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2522 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2523 MODULE_VERSION(VERSION);
2524 MODULE_LICENSE("GPL");
2525 MODULE_ALIAS_NETPROTO(PF_IUCV);