Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[sfrench/cifs-2.6.git] / net / rxrpc / call_accept.c
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
24 #include <net/sock.h>
25 #include <net/af_rxrpc.h>
26 #include <net/ip.h>
27 #include "ar-internal.h"
28
29 /*
30  * Preallocate a single service call, connection and peer and, if possible,
31  * give them a user ID and attach the user's side of the ID to them.
32  */
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34                                       struct rxrpc_backlog *b,
35                                       rxrpc_notify_rx_t notify_rx,
36                                       rxrpc_user_attach_call_t user_attach_call,
37                                       unsigned long user_call_ID, gfp_t gfp)
38 {
39         const void *here = __builtin_return_address(0);
40         struct rxrpc_call *call;
41         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
42         int max, tmp;
43         unsigned int size = RXRPC_BACKLOG_MAX;
44         unsigned int head, tail, call_head, call_tail;
45
46         max = rx->sk.sk_max_ack_backlog;
47         tmp = rx->sk.sk_ack_backlog;
48         if (tmp >= max) {
49                 _leave(" = -ENOBUFS [full %u]", max);
50                 return -ENOBUFS;
51         }
52         max -= tmp;
53
54         /* We don't need more conns and peers than we have calls, but on the
55          * other hand, we shouldn't ever use more peers than conns or conns
56          * than calls.
57          */
58         call_head = b->call_backlog_head;
59         call_tail = READ_ONCE(b->call_backlog_tail);
60         tmp = CIRC_CNT(call_head, call_tail, size);
61         if (tmp >= max) {
62                 _leave(" = -ENOBUFS [enough %u]", tmp);
63                 return -ENOBUFS;
64         }
65         max = tmp + 1;
66
67         head = b->peer_backlog_head;
68         tail = READ_ONCE(b->peer_backlog_tail);
69         if (CIRC_CNT(head, tail, size) < max) {
70                 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
71                 if (!peer)
72                         return -ENOMEM;
73                 b->peer_backlog[head] = peer;
74                 smp_store_release(&b->peer_backlog_head,
75                                   (head + 1) & (size - 1));
76         }
77
78         head = b->conn_backlog_head;
79         tail = READ_ONCE(b->conn_backlog_tail);
80         if (CIRC_CNT(head, tail, size) < max) {
81                 struct rxrpc_connection *conn;
82
83                 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
84                 if (!conn)
85                         return -ENOMEM;
86                 b->conn_backlog[head] = conn;
87                 smp_store_release(&b->conn_backlog_head,
88                                   (head + 1) & (size - 1));
89
90                 trace_rxrpc_conn(conn, rxrpc_conn_new_service,
91                                  atomic_read(&conn->usage), here);
92         }
93
94         /* Now it gets complicated, because calls get registered with the
95          * socket here, particularly if a user ID is preassigned by the user.
96          */
97         call = rxrpc_alloc_call(gfp);
98         if (!call)
99                 return -ENOMEM;
100         call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
101         call->state = RXRPC_CALL_SERVER_PREALLOC;
102
103         trace_rxrpc_call(call, rxrpc_call_new_service,
104                          atomic_read(&call->usage),
105                          here, (const void *)user_call_ID);
106
107         write_lock(&rx->call_lock);
108         if (user_attach_call) {
109                 struct rxrpc_call *xcall;
110                 struct rb_node *parent, **pp;
111
112                 /* Check the user ID isn't already in use */
113                 pp = &rx->calls.rb_node;
114                 parent = NULL;
115                 while (*pp) {
116                         parent = *pp;
117                         xcall = rb_entry(parent, struct rxrpc_call, sock_node);
118                         if (user_call_ID < call->user_call_ID)
119                                 pp = &(*pp)->rb_left;
120                         else if (user_call_ID > call->user_call_ID)
121                                 pp = &(*pp)->rb_right;
122                         else
123                                 goto id_in_use;
124                 }
125
126                 call->user_call_ID = user_call_ID;
127                 call->notify_rx = notify_rx;
128                 rxrpc_get_call(call, rxrpc_call_got_kernel);
129                 user_attach_call(call, user_call_ID);
130                 rxrpc_get_call(call, rxrpc_call_got_userid);
131                 rb_link_node(&call->sock_node, parent, pp);
132                 rb_insert_color(&call->sock_node, &rx->calls);
133                 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
134         }
135
136         list_add(&call->sock_link, &rx->sock_calls);
137
138         write_unlock(&rx->call_lock);
139
140         write_lock(&rxnet->call_lock);
141         list_add_tail(&call->link, &rxnet->calls);
142         write_unlock(&rxnet->call_lock);
143
144         b->call_backlog[call_head] = call;
145         smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
146         _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
147         return 0;
148
149 id_in_use:
150         write_unlock(&rx->call_lock);
151         rxrpc_cleanup_call(call);
152         _leave(" = -EBADSLT");
153         return -EBADSLT;
154 }
155
156 /*
157  * Preallocate sufficient service connections, calls and peers to cover the
158  * entire backlog of a socket.  When a new call comes in, if we don't have
159  * sufficient of each available, the call gets rejected as busy or ignored.
160  *
161  * The backlog is replenished when a connection is accepted or rejected.
162  */
163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
164 {
165         struct rxrpc_backlog *b = rx->backlog;
166
167         if (!b) {
168                 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
169                 if (!b)
170                         return -ENOMEM;
171                 rx->backlog = b;
172         }
173
174         if (rx->discard_new_call)
175                 return 0;
176
177         while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
178                 ;
179
180         return 0;
181 }
182
183 /*
184  * Discard the preallocation on a service.
185  */
186 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
187 {
188         struct rxrpc_backlog *b = rx->backlog;
189         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
190         unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
191
192         if (!b)
193                 return;
194         rx->backlog = NULL;
195
196         /* Make sure that there aren't any incoming calls in progress before we
197          * clear the preallocation buffers.
198          */
199         spin_lock_bh(&rx->incoming_lock);
200         spin_unlock_bh(&rx->incoming_lock);
201
202         head = b->peer_backlog_head;
203         tail = b->peer_backlog_tail;
204         while (CIRC_CNT(head, tail, size) > 0) {
205                 struct rxrpc_peer *peer = b->peer_backlog[tail];
206                 kfree(peer);
207                 tail = (tail + 1) & (size - 1);
208         }
209
210         head = b->conn_backlog_head;
211         tail = b->conn_backlog_tail;
212         while (CIRC_CNT(head, tail, size) > 0) {
213                 struct rxrpc_connection *conn = b->conn_backlog[tail];
214                 write_lock(&rxnet->conn_lock);
215                 list_del(&conn->link);
216                 list_del(&conn->proc_link);
217                 write_unlock(&rxnet->conn_lock);
218                 kfree(conn);
219                 tail = (tail + 1) & (size - 1);
220         }
221
222         head = b->call_backlog_head;
223         tail = b->call_backlog_tail;
224         while (CIRC_CNT(head, tail, size) > 0) {
225                 struct rxrpc_call *call = b->call_backlog[tail];
226                 if (rx->discard_new_call) {
227                         _debug("discard %lx", call->user_call_ID);
228                         rx->discard_new_call(call, call->user_call_ID);
229                         rxrpc_put_call(call, rxrpc_call_put_kernel);
230                 }
231                 rxrpc_call_completed(call);
232                 rxrpc_release_call(rx, call);
233                 rxrpc_put_call(call, rxrpc_call_put);
234                 tail = (tail + 1) & (size - 1);
235         }
236
237         kfree(b);
238 }
239
240 /*
241  * Allocate a new incoming call from the prealloc pool, along with a connection
242  * and a peer as necessary.
243  */
244 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
245                                                     struct rxrpc_local *local,
246                                                     struct rxrpc_connection *conn,
247                                                     struct sk_buff *skb)
248 {
249         struct rxrpc_backlog *b = rx->backlog;
250         struct rxrpc_peer *peer, *xpeer;
251         struct rxrpc_call *call;
252         unsigned short call_head, conn_head, peer_head;
253         unsigned short call_tail, conn_tail, peer_tail;
254         unsigned short call_count, conn_count;
255
256         /* #calls >= #conns >= #peers must hold true. */
257         call_head = smp_load_acquire(&b->call_backlog_head);
258         call_tail = b->call_backlog_tail;
259         call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
260         conn_head = smp_load_acquire(&b->conn_backlog_head);
261         conn_tail = b->conn_backlog_tail;
262         conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
263         ASSERTCMP(conn_count, >=, call_count);
264         peer_head = smp_load_acquire(&b->peer_backlog_head);
265         peer_tail = b->peer_backlog_tail;
266         ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
267                   conn_count);
268
269         if (call_count == 0)
270                 return NULL;
271
272         if (!conn) {
273                 /* No connection.  We're going to need a peer to start off
274                  * with.  If one doesn't yet exist, use a spare from the
275                  * preallocation set.  We dump the address into the spare in
276                  * anticipation - and to save on stack space.
277                  */
278                 xpeer = b->peer_backlog[peer_tail];
279                 if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
280                         return NULL;
281
282                 peer = rxrpc_lookup_incoming_peer(local, xpeer);
283                 if (peer == xpeer) {
284                         b->peer_backlog[peer_tail] = NULL;
285                         smp_store_release(&b->peer_backlog_tail,
286                                           (peer_tail + 1) &
287                                           (RXRPC_BACKLOG_MAX - 1));
288                 }
289
290                 /* Now allocate and set up the connection */
291                 conn = b->conn_backlog[conn_tail];
292                 b->conn_backlog[conn_tail] = NULL;
293                 smp_store_release(&b->conn_backlog_tail,
294                                   (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
295                 rxrpc_get_local(local);
296                 conn->params.local = local;
297                 conn->params.peer = peer;
298                 rxrpc_see_connection(conn);
299                 rxrpc_new_incoming_connection(rx, conn, skb);
300         } else {
301                 rxrpc_get_connection(conn);
302         }
303
304         /* And now we can allocate and set up a new call */
305         call = b->call_backlog[call_tail];
306         b->call_backlog[call_tail] = NULL;
307         smp_store_release(&b->call_backlog_tail,
308                           (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
309
310         rxrpc_see_call(call);
311         call->conn = conn;
312         call->peer = rxrpc_get_peer(conn->params.peer);
313         call->cong_cwnd = call->peer->cong_cwnd;
314         return call;
315 }
316
317 /*
318  * Set up a new incoming call.  Called in BH context with the RCU read lock
319  * held.
320  *
321  * If this is for a kernel service, when we allocate the call, it will have
322  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
323  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
324  * services only have the ref from the backlog buffer.  We want to pass this
325  * ref to non-BH context to dispose of.
326  *
327  * If we want to report an error, we mark the skb with the packet type and
328  * abort code and return NULL.
329  *
330  * The call is returned with the user access mutex held.
331  */
332 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
333                                            struct rxrpc_connection *conn,
334                                            struct sk_buff *skb)
335 {
336         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
337         struct rxrpc_sock *rx;
338         struct rxrpc_call *call;
339         u16 service_id = sp->hdr.serviceId;
340
341         _enter("");
342
343         /* Get the socket providing the service */
344         rx = rcu_dereference(local->service);
345         if (rx && (service_id == rx->srx.srx_service ||
346                    service_id == rx->second_service))
347                 goto found_service;
348
349         trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
350                           RX_INVALID_OPERATION, EOPNOTSUPP);
351         skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
352         skb->priority = RX_INVALID_OPERATION;
353         _leave(" = NULL [service]");
354         return NULL;
355
356 found_service:
357         spin_lock(&rx->incoming_lock);
358         if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
359             rx->sk.sk_state == RXRPC_CLOSE) {
360                 trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
361                                   sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
362                 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
363                 skb->priority = RX_INVALID_OPERATION;
364                 _leave(" = NULL [close]");
365                 call = NULL;
366                 goto out;
367         }
368
369         call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
370         if (!call) {
371                 skb->mark = RXRPC_SKB_MARK_BUSY;
372                 _leave(" = NULL [busy]");
373                 call = NULL;
374                 goto out;
375         }
376
377         trace_rxrpc_receive(call, rxrpc_receive_incoming,
378                             sp->hdr.serial, sp->hdr.seq);
379
380         /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
381          * sendmsg()/recvmsg() inconveniently stealing the mutex once the
382          * notification is generated.
383          *
384          * The BUG should never happen because the kernel should be well
385          * behaved enough not to access the call before the first notification
386          * event and userspace is prevented from doing so until the state is
387          * appropriate.
388          */
389         if (!mutex_trylock(&call->user_mutex))
390                 BUG();
391
392         /* Make the call live. */
393         rxrpc_incoming_call(rx, call, skb);
394         conn = call->conn;
395
396         if (rx->notify_new_call)
397                 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
398         else
399                 sk_acceptq_added(&rx->sk);
400
401         spin_lock(&conn->state_lock);
402         switch (conn->state) {
403         case RXRPC_CONN_SERVICE_UNSECURED:
404                 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
405                 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
406                 rxrpc_queue_conn(call->conn);
407                 break;
408
409         case RXRPC_CONN_SERVICE:
410                 write_lock(&call->state_lock);
411                 if (rx->discard_new_call)
412                         call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
413                 else
414                         call->state = RXRPC_CALL_SERVER_ACCEPTING;
415                 write_unlock(&call->state_lock);
416                 break;
417
418         case RXRPC_CONN_REMOTELY_ABORTED:
419                 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
420                                           conn->remote_abort, -ECONNABORTED);
421                 break;
422         case RXRPC_CONN_LOCALLY_ABORTED:
423                 rxrpc_abort_call("CON", call, sp->hdr.seq,
424                                  conn->local_abort, -ECONNABORTED);
425                 break;
426         default:
427                 BUG();
428         }
429         spin_unlock(&conn->state_lock);
430
431         if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
432                 rxrpc_notify_socket(call);
433
434         /* We have to discard the prealloc queue's ref here and rely on a
435          * combination of the RCU read lock and refs held either by the socket
436          * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
437          * service to prevent the call from being deallocated too early.
438          */
439         rxrpc_put_call(call, rxrpc_call_put);
440
441         _leave(" = %p{%d}", call, call->debug_id);
442 out:
443         spin_unlock(&rx->incoming_lock);
444         return call;
445 }
446
447 /*
448  * handle acceptance of a call by userspace
449  * - assign the user call ID to the call at the front of the queue
450  * - called with the socket locked.
451  */
452 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
453                                      unsigned long user_call_ID,
454                                      rxrpc_notify_rx_t notify_rx)
455         __releases(&rx->sk.sk_lock.slock)
456 {
457         struct rxrpc_call *call;
458         struct rb_node *parent, **pp;
459         int ret;
460
461         _enter(",%lx", user_call_ID);
462
463         ASSERT(!irqs_disabled());
464
465         write_lock(&rx->call_lock);
466
467         if (list_empty(&rx->to_be_accepted)) {
468                 write_unlock(&rx->call_lock);
469                 release_sock(&rx->sk);
470                 kleave(" = -ENODATA [empty]");
471                 return ERR_PTR(-ENODATA);
472         }
473
474         /* check the user ID isn't already in use */
475         pp = &rx->calls.rb_node;
476         parent = NULL;
477         while (*pp) {
478                 parent = *pp;
479                 call = rb_entry(parent, struct rxrpc_call, sock_node);
480
481                 if (user_call_ID < call->user_call_ID)
482                         pp = &(*pp)->rb_left;
483                 else if (user_call_ID > call->user_call_ID)
484                         pp = &(*pp)->rb_right;
485                 else
486                         goto id_in_use;
487         }
488
489         /* Dequeue the first call and check it's still valid.  We gain
490          * responsibility for the queue's reference.
491          */
492         call = list_entry(rx->to_be_accepted.next,
493                           struct rxrpc_call, accept_link);
494         write_unlock(&rx->call_lock);
495
496         /* We need to gain the mutex from the interrupt handler without
497          * upsetting lockdep, so we have to release it there and take it here.
498          * We are, however, still holding the socket lock, so other accepts
499          * must wait for us and no one can add the user ID behind our backs.
500          */
501         if (mutex_lock_interruptible(&call->user_mutex) < 0) {
502                 release_sock(&rx->sk);
503                 kleave(" = -ERESTARTSYS");
504                 return ERR_PTR(-ERESTARTSYS);
505         }
506
507         write_lock(&rx->call_lock);
508         list_del_init(&call->accept_link);
509         sk_acceptq_removed(&rx->sk);
510         rxrpc_see_call(call);
511
512         /* Find the user ID insertion point. */
513         pp = &rx->calls.rb_node;
514         parent = NULL;
515         while (*pp) {
516                 parent = *pp;
517                 call = rb_entry(parent, struct rxrpc_call, sock_node);
518
519                 if (user_call_ID < call->user_call_ID)
520                         pp = &(*pp)->rb_left;
521                 else if (user_call_ID > call->user_call_ID)
522                         pp = &(*pp)->rb_right;
523                 else
524                         BUG();
525         }
526
527         write_lock_bh(&call->state_lock);
528         switch (call->state) {
529         case RXRPC_CALL_SERVER_ACCEPTING:
530                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
531                 break;
532         case RXRPC_CALL_COMPLETE:
533                 ret = call->error;
534                 goto out_release;
535         default:
536                 BUG();
537         }
538
539         /* formalise the acceptance */
540         call->notify_rx = notify_rx;
541         call->user_call_ID = user_call_ID;
542         rxrpc_get_call(call, rxrpc_call_got_userid);
543         rb_link_node(&call->sock_node, parent, pp);
544         rb_insert_color(&call->sock_node, &rx->calls);
545         if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
546                 BUG();
547
548         write_unlock_bh(&call->state_lock);
549         write_unlock(&rx->call_lock);
550         rxrpc_notify_socket(call);
551         rxrpc_service_prealloc(rx, GFP_KERNEL);
552         release_sock(&rx->sk);
553         _leave(" = %p{%d}", call, call->debug_id);
554         return call;
555
556 out_release:
557         _debug("release %p", call);
558         write_unlock_bh(&call->state_lock);
559         write_unlock(&rx->call_lock);
560         rxrpc_release_call(rx, call);
561         rxrpc_put_call(call, rxrpc_call_put);
562         goto out;
563
564 id_in_use:
565         ret = -EBADSLT;
566         write_unlock(&rx->call_lock);
567 out:
568         rxrpc_service_prealloc(rx, GFP_KERNEL);
569         release_sock(&rx->sk);
570         _leave(" = %d", ret);
571         return ERR_PTR(ret);
572 }
573
574 /*
575  * Handle rejection of a call by userspace
576  * - reject the call at the front of the queue
577  */
578 int rxrpc_reject_call(struct rxrpc_sock *rx)
579 {
580         struct rxrpc_call *call;
581         bool abort = false;
582         int ret;
583
584         _enter("");
585
586         ASSERT(!irqs_disabled());
587
588         write_lock(&rx->call_lock);
589
590         if (list_empty(&rx->to_be_accepted)) {
591                 write_unlock(&rx->call_lock);
592                 return -ENODATA;
593         }
594
595         /* Dequeue the first call and check it's still valid.  We gain
596          * responsibility for the queue's reference.
597          */
598         call = list_entry(rx->to_be_accepted.next,
599                           struct rxrpc_call, accept_link);
600         list_del_init(&call->accept_link);
601         sk_acceptq_removed(&rx->sk);
602         rxrpc_see_call(call);
603
604         write_lock_bh(&call->state_lock);
605         switch (call->state) {
606         case RXRPC_CALL_SERVER_ACCEPTING:
607                 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
608                 abort = true;
609                 /* fall through */
610         case RXRPC_CALL_COMPLETE:
611                 ret = call->error;
612                 goto out_discard;
613         default:
614                 BUG();
615         }
616
617 out_discard:
618         write_unlock_bh(&call->state_lock);
619         write_unlock(&rx->call_lock);
620         if (abort) {
621                 rxrpc_send_abort_packet(call);
622                 rxrpc_release_call(rx, call);
623                 rxrpc_put_call(call, rxrpc_call_put);
624         }
625         rxrpc_service_prealloc(rx, GFP_KERNEL);
626         _leave(" = %d", ret);
627         return ret;
628 }
629
630 /*
631  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
632  * @sock: The socket on which to preallocate
633  * @notify_rx: Event notification function for the call
634  * @user_attach_call: Func to attach call to user_call_ID
635  * @user_call_ID: The tag to attach to the preallocated call
636  * @gfp: The allocation conditions.
637  *
638  * Charge up the socket with preallocated calls, each with a user ID.  A
639  * function should be provided to effect the attachment from the user's side.
640  * The user is given a ref to hold on the call.
641  *
642  * Note that the call may be come connected before this function returns.
643  */
644 int rxrpc_kernel_charge_accept(struct socket *sock,
645                                rxrpc_notify_rx_t notify_rx,
646                                rxrpc_user_attach_call_t user_attach_call,
647                                unsigned long user_call_ID, gfp_t gfp)
648 {
649         struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
650         struct rxrpc_backlog *b = rx->backlog;
651
652         if (sock->sk->sk_state == RXRPC_CLOSE)
653                 return -ESHUTDOWN;
654
655         return rxrpc_service_prealloc_one(rx, b, notify_rx,
656                                           user_attach_call, user_call_ID,
657                                           gfp);
658 }
659 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);