Merge tag 'asoc-fix-4.17-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
24         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
25         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
26         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
27         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
28         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
29         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
30         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
31         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
32         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
33         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
34         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
35         [RXRPC_CALL_COMPLETE]                   = "Complete",
36 };
37
38 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
39         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
40         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
41         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
42         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
43         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
44 };
45
46 struct kmem_cache *rxrpc_call_jar;
47
48 static void rxrpc_call_timer_expired(struct timer_list *t)
49 {
50         struct rxrpc_call *call = from_timer(call, t, timer);
51
52         _enter("%d", call->debug_id);
53
54         if (call->state < RXRPC_CALL_COMPLETE) {
55                 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56                 rxrpc_queue_call(call);
57         }
58 }
59
60 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61
62 /*
63  * find an extant server call
64  * - called in process context with IRQs enabled
65  */
66 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
67                                               unsigned long user_call_ID)
68 {
69         struct rxrpc_call *call;
70         struct rb_node *p;
71
72         _enter("%p,%lx", rx, user_call_ID);
73
74         read_lock(&rx->call_lock);
75
76         p = rx->calls.rb_node;
77         while (p) {
78                 call = rb_entry(p, struct rxrpc_call, sock_node);
79
80                 if (user_call_ID < call->user_call_ID)
81                         p = p->rb_left;
82                 else if (user_call_ID > call->user_call_ID)
83                         p = p->rb_right;
84                 else
85                         goto found_extant_call;
86         }
87
88         read_unlock(&rx->call_lock);
89         _leave(" = NULL");
90         return NULL;
91
92 found_extant_call:
93         rxrpc_get_call(call, rxrpc_call_got);
94         read_unlock(&rx->call_lock);
95         _leave(" = %p [%d]", call, atomic_read(&call->usage));
96         return call;
97 }
98
99 /*
100  * allocate a new call
101  */
102 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103                                     unsigned int debug_id)
104 {
105         struct rxrpc_call *call;
106         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
107
108         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
109         if (!call)
110                 return NULL;
111
112         call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
113                                     sizeof(struct sk_buff *),
114                                     gfp);
115         if (!call->rxtx_buffer)
116                 goto nomem;
117
118         call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
119         if (!call->rxtx_annotations)
120                 goto nomem_2;
121
122         mutex_init(&call->user_mutex);
123
124         /* Prevent lockdep reporting a deadlock false positive between the afs
125          * filesystem and sys_sendmsg() via the mmap sem.
126          */
127         if (rx->sk.sk_kern_sock)
128                 lockdep_set_class(&call->user_mutex,
129                                   &rxrpc_call_user_mutex_lock_class_key);
130
131         timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
132         INIT_WORK(&call->processor, &rxrpc_process_call);
133         INIT_LIST_HEAD(&call->link);
134         INIT_LIST_HEAD(&call->chan_wait_link);
135         INIT_LIST_HEAD(&call->accept_link);
136         INIT_LIST_HEAD(&call->recvmsg_link);
137         INIT_LIST_HEAD(&call->sock_link);
138         init_waitqueue_head(&call->waitq);
139         spin_lock_init(&call->lock);
140         spin_lock_init(&call->notify_lock);
141         rwlock_init(&call->state_lock);
142         atomic_set(&call->usage, 1);
143         call->debug_id = debug_id;
144         call->tx_total_len = -1;
145         call->next_rx_timo = 20 * HZ;
146         call->next_req_timo = 1 * HZ;
147
148         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
149
150         /* Leave space in the ring to handle a maxed-out jumbo packet */
151         call->rx_winsize = rxrpc_rx_window_size;
152         call->tx_winsize = 16;
153         call->rx_expect_next = 1;
154
155         call->cong_cwnd = 2;
156         call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
157
158         call->rxnet = rxnet;
159         atomic_inc(&rxnet->nr_calls);
160         return call;
161
162 nomem_2:
163         kfree(call->rxtx_buffer);
164 nomem:
165         kmem_cache_free(rxrpc_call_jar, call);
166         return NULL;
167 }
168
169 /*
170  * Allocate a new client call.
171  */
172 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
173                                                   struct sockaddr_rxrpc *srx,
174                                                   gfp_t gfp,
175                                                   unsigned int debug_id)
176 {
177         struct rxrpc_call *call;
178         ktime_t now;
179
180         _enter("");
181
182         call = rxrpc_alloc_call(rx, gfp, debug_id);
183         if (!call)
184                 return ERR_PTR(-ENOMEM);
185         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
186         call->service_id = srx->srx_service;
187         call->tx_phase = true;
188         now = ktime_get_real();
189         call->acks_latest_ts = now;
190         call->cong_tstamp = now;
191
192         _leave(" = %p", call);
193         return call;
194 }
195
196 /*
197  * Initiate the call ack/resend/expiry timer.
198  */
199 static void rxrpc_start_call_timer(struct rxrpc_call *call)
200 {
201         unsigned long now = jiffies;
202         unsigned long j = now + MAX_JIFFY_OFFSET;
203
204         call->ack_at = j;
205         call->ack_lost_at = j;
206         call->resend_at = j;
207         call->ping_at = j;
208         call->expect_rx_by = j;
209         call->expect_req_by = j;
210         call->expect_term_by = j;
211         call->timer.expires = now;
212 }
213
214 /*
215  * Set up a call for the given parameters.
216  * - Called with the socket lock held, which it must release.
217  * - If it returns a call, the call's lock will need releasing by the caller.
218  */
219 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
220                                          struct rxrpc_conn_parameters *cp,
221                                          struct sockaddr_rxrpc *srx,
222                                          struct rxrpc_call_params *p,
223                                          gfp_t gfp,
224                                          unsigned int debug_id)
225         __releases(&rx->sk.sk_lock.slock)
226         __acquires(&call->user_mutex)
227 {
228         struct rxrpc_call *call, *xcall;
229         struct rxrpc_net *rxnet;
230         struct rb_node *parent, **pp;
231         const void *here = __builtin_return_address(0);
232         int ret;
233
234         _enter("%p,%lx", rx, p->user_call_ID);
235
236         call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
237         if (IS_ERR(call)) {
238                 release_sock(&rx->sk);
239                 _leave(" = %ld", PTR_ERR(call));
240                 return call;
241         }
242
243         call->tx_total_len = p->tx_total_len;
244         trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
245                          here, (const void *)p->user_call_ID);
246
247         /* We need to protect a partially set up call against the user as we
248          * will be acting outside the socket lock.
249          */
250         mutex_lock(&call->user_mutex);
251
252         /* Publish the call, even though it is incompletely set up as yet */
253         write_lock(&rx->call_lock);
254
255         pp = &rx->calls.rb_node;
256         parent = NULL;
257         while (*pp) {
258                 parent = *pp;
259                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
260
261                 if (p->user_call_ID < xcall->user_call_ID)
262                         pp = &(*pp)->rb_left;
263                 else if (p->user_call_ID > xcall->user_call_ID)
264                         pp = &(*pp)->rb_right;
265                 else
266                         goto error_dup_user_ID;
267         }
268
269         rcu_assign_pointer(call->socket, rx);
270         call->user_call_ID = p->user_call_ID;
271         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
272         rxrpc_get_call(call, rxrpc_call_got_userid);
273         rb_link_node(&call->sock_node, parent, pp);
274         rb_insert_color(&call->sock_node, &rx->calls);
275         list_add(&call->sock_link, &rx->sock_calls);
276
277         write_unlock(&rx->call_lock);
278
279         rxnet = call->rxnet;
280         write_lock(&rxnet->call_lock);
281         list_add_tail(&call->link, &rxnet->calls);
282         write_unlock(&rxnet->call_lock);
283
284         /* From this point on, the call is protected by its own lock. */
285         release_sock(&rx->sk);
286
287         /* Set up or get a connection record and set the protocol parameters,
288          * including channel number and call ID.
289          */
290         ret = rxrpc_connect_call(call, cp, srx, gfp);
291         if (ret < 0)
292                 goto error;
293
294         trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
295                          here, NULL);
296
297         rxrpc_start_call_timer(call);
298
299         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
300
301         _leave(" = %p [new]", call);
302         return call;
303
304         /* We unexpectedly found the user ID in the list after taking
305          * the call_lock.  This shouldn't happen unless the user races
306          * with itself and tries to add the same user ID twice at the
307          * same time in different threads.
308          */
309 error_dup_user_ID:
310         write_unlock(&rx->call_lock);
311         release_sock(&rx->sk);
312         ret = -EEXIST;
313
314 error:
315         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
316                                     RX_CALL_DEAD, ret);
317         trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
318                          here, ERR_PTR(ret));
319         rxrpc_release_call(rx, call);
320         mutex_unlock(&call->user_mutex);
321         rxrpc_put_call(call, rxrpc_call_put);
322         _leave(" = %d", ret);
323         return ERR_PTR(ret);
324 }
325
326 /*
327  * Retry a call to a new address.  It is expected that the Tx queue of the call
328  * will contain data previously packaged for an old call.
329  */
330 int rxrpc_retry_client_call(struct rxrpc_sock *rx,
331                             struct rxrpc_call *call,
332                             struct rxrpc_conn_parameters *cp,
333                             struct sockaddr_rxrpc *srx,
334                             gfp_t gfp)
335 {
336         const void *here = __builtin_return_address(0);
337         int ret;
338
339         /* Set up or get a connection record and set the protocol parameters,
340          * including channel number and call ID.
341          */
342         ret = rxrpc_connect_call(call, cp, srx, gfp);
343         if (ret < 0)
344                 goto error;
345
346         trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
347                          here, NULL);
348
349         rxrpc_start_call_timer(call);
350
351         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
352
353         if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
354                 rxrpc_queue_call(call);
355
356         _leave(" = 0");
357         return 0;
358
359 error:
360         rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
361                                   RX_CALL_DEAD, ret);
362         trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
363                          here, ERR_PTR(ret));
364         _leave(" = %d", ret);
365         return ret;
366 }
367
368 /*
369  * Set up an incoming call.  call->conn points to the connection.
370  * This is called in BH context and isn't allowed to fail.
371  */
372 void rxrpc_incoming_call(struct rxrpc_sock *rx,
373                          struct rxrpc_call *call,
374                          struct sk_buff *skb)
375 {
376         struct rxrpc_connection *conn = call->conn;
377         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
378         u32 chan;
379
380         _enter(",%d", call->conn->debug_id);
381
382         rcu_assign_pointer(call->socket, rx);
383         call->call_id           = sp->hdr.callNumber;
384         call->service_id        = sp->hdr.serviceId;
385         call->cid               = sp->hdr.cid;
386         call->state             = RXRPC_CALL_SERVER_ACCEPTING;
387         if (sp->hdr.securityIndex > 0)
388                 call->state     = RXRPC_CALL_SERVER_SECURING;
389         call->cong_tstamp       = skb->tstamp;
390
391         /* Set the channel for this call.  We don't get channel_lock as we're
392          * only defending against the data_ready handler (which we're called
393          * from) and the RESPONSE packet parser (which is only really
394          * interested in call_counter and can cope with a disagreement with the
395          * call pointer).
396          */
397         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
398         conn->channels[chan].call_counter = call->call_id;
399         conn->channels[chan].call_id = call->call_id;
400         rcu_assign_pointer(conn->channels[chan].call, call);
401
402         spin_lock(&conn->params.peer->lock);
403         hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
404         spin_unlock(&conn->params.peer->lock);
405
406         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
407
408         rxrpc_start_call_timer(call);
409         _leave("");
410 }
411
412 /*
413  * Queue a call's work processor, getting a ref to pass to the work queue.
414  */
415 bool rxrpc_queue_call(struct rxrpc_call *call)
416 {
417         const void *here = __builtin_return_address(0);
418         int n = __atomic_add_unless(&call->usage, 1, 0);
419         if (n == 0)
420                 return false;
421         if (rxrpc_queue_work(&call->processor))
422                 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
423         else
424                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
425         return true;
426 }
427
428 /*
429  * Queue a call's work processor, passing the callers ref to the work queue.
430  */
431 bool __rxrpc_queue_call(struct rxrpc_call *call)
432 {
433         const void *here = __builtin_return_address(0);
434         int n = atomic_read(&call->usage);
435         ASSERTCMP(n, >=, 1);
436         if (rxrpc_queue_work(&call->processor))
437                 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
438         else
439                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
440         return true;
441 }
442
443 /*
444  * Note the re-emergence of a call.
445  */
446 void rxrpc_see_call(struct rxrpc_call *call)
447 {
448         const void *here = __builtin_return_address(0);
449         if (call) {
450                 int n = atomic_read(&call->usage);
451
452                 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
453         }
454 }
455
456 /*
457  * Note the addition of a ref on a call.
458  */
459 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
460 {
461         const void *here = __builtin_return_address(0);
462         int n = atomic_inc_return(&call->usage);
463
464         trace_rxrpc_call(call, op, n, here, NULL);
465 }
466
467 /*
468  * Detach a call from its owning socket.
469  */
470 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
471 {
472         const void *here = __builtin_return_address(0);
473         struct rxrpc_connection *conn = call->conn;
474         bool put = false;
475         int i;
476
477         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
478
479         trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
480                          here, (const void *)call->flags);
481
482         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
483
484         spin_lock_bh(&call->lock);
485         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
486                 BUG();
487         spin_unlock_bh(&call->lock);
488
489         del_timer_sync(&call->timer);
490
491         /* Make sure we don't get any more notifications */
492         write_lock_bh(&rx->recvmsg_lock);
493
494         if (!list_empty(&call->recvmsg_link)) {
495                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
496                        call, call->events, call->flags);
497                 list_del(&call->recvmsg_link);
498                 put = true;
499         }
500
501         /* list_empty() must return false in rxrpc_notify_socket() */
502         call->recvmsg_link.next = NULL;
503         call->recvmsg_link.prev = NULL;
504
505         write_unlock_bh(&rx->recvmsg_lock);
506         if (put)
507                 rxrpc_put_call(call, rxrpc_call_put);
508
509         write_lock(&rx->call_lock);
510
511         if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
512                 rb_erase(&call->sock_node, &rx->calls);
513                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
514                 rxrpc_put_call(call, rxrpc_call_put_userid);
515         }
516
517         list_del(&call->sock_link);
518         write_unlock(&rx->call_lock);
519
520         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
521
522         if (conn)
523                 rxrpc_disconnect_call(call);
524
525         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
526                 rxrpc_free_skb(call->rxtx_buffer[i],
527                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
528                                 rxrpc_skb_rx_cleaned));
529                 call->rxtx_buffer[i] = NULL;
530         }
531
532         _leave("");
533 }
534
535 /*
536  * Prepare a kernel service call for retry.
537  */
538 int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
539 {
540         const void *here = __builtin_return_address(0);
541         int i;
542         u8 last = 0;
543
544         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
545
546         trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
547                          here, (const void *)call->flags);
548
549         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
550         ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
551         ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
552         ASSERT(list_empty(&call->recvmsg_link));
553
554         del_timer_sync(&call->timer);
555
556         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
557
558         if (call->conn)
559                 rxrpc_disconnect_call(call);
560
561         if (rxrpc_is_service_call(call) ||
562             !call->tx_phase ||
563             call->tx_hard_ack != 0 ||
564             call->rx_hard_ack != 0 ||
565             call->rx_top != 0)
566                 return -EINVAL;
567
568         call->state = RXRPC_CALL_UNINITIALISED;
569         call->completion = RXRPC_CALL_SUCCEEDED;
570         call->call_id = 0;
571         call->cid = 0;
572         call->cong_cwnd = 0;
573         call->cong_extra = 0;
574         call->cong_ssthresh = 0;
575         call->cong_mode = 0;
576         call->cong_dup_acks = 0;
577         call->cong_cumul_acks = 0;
578         call->acks_lowest_nak = 0;
579
580         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
581                 last |= call->rxtx_annotations[i];
582                 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
583                 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
584         }
585
586         _leave(" = 0");
587         return 0;
588 }
589
590 /*
591  * release all the calls associated with a socket
592  */
593 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
594 {
595         struct rxrpc_call *call;
596
597         _enter("%p", rx);
598
599         while (!list_empty(&rx->to_be_accepted)) {
600                 call = list_entry(rx->to_be_accepted.next,
601                                   struct rxrpc_call, accept_link);
602                 list_del(&call->accept_link);
603                 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
604                 rxrpc_put_call(call, rxrpc_call_put);
605         }
606
607         while (!list_empty(&rx->sock_calls)) {
608                 call = list_entry(rx->sock_calls.next,
609                                   struct rxrpc_call, sock_link);
610                 rxrpc_get_call(call, rxrpc_call_got);
611                 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
612                 rxrpc_send_abort_packet(call);
613                 rxrpc_release_call(rx, call);
614                 rxrpc_put_call(call, rxrpc_call_put);
615         }
616
617         _leave("");
618 }
619
620 /*
621  * release a call
622  */
623 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
624 {
625         struct rxrpc_net *rxnet = call->rxnet;
626         const void *here = __builtin_return_address(0);
627         int n;
628
629         ASSERT(call != NULL);
630
631         n = atomic_dec_return(&call->usage);
632         trace_rxrpc_call(call, op, n, here, NULL);
633         ASSERTCMP(n, >=, 0);
634         if (n == 0) {
635                 _debug("call %d dead", call->debug_id);
636                 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
637
638                 if (!list_empty(&call->link)) {
639                         write_lock(&rxnet->call_lock);
640                         list_del_init(&call->link);
641                         write_unlock(&rxnet->call_lock);
642                 }
643
644                 rxrpc_cleanup_call(call);
645         }
646 }
647
648 /*
649  * Final call destruction under RCU.
650  */
651 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
652 {
653         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
654         struct rxrpc_net *rxnet = call->rxnet;
655
656         rxrpc_put_peer(call->peer);
657         kfree(call->rxtx_buffer);
658         kfree(call->rxtx_annotations);
659         kmem_cache_free(rxrpc_call_jar, call);
660         if (atomic_dec_and_test(&rxnet->nr_calls))
661                 wake_up_var(&rxnet->nr_calls);
662 }
663
664 /*
665  * clean up a call
666  */
667 void rxrpc_cleanup_call(struct rxrpc_call *call)
668 {
669         int i;
670
671         _net("DESTROY CALL %d", call->debug_id);
672
673         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
674
675         del_timer_sync(&call->timer);
676
677         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
678         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
679         ASSERTCMP(call->conn, ==, NULL);
680
681         /* Clean up the Rx/Tx buffer */
682         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
683                 rxrpc_free_skb(call->rxtx_buffer[i],
684                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
685                                 rxrpc_skb_rx_cleaned));
686
687         rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
688
689         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
690 }
691
692 /*
693  * Make sure that all calls are gone from a network namespace.  To reach this
694  * point, any open UDP sockets in that namespace must have been closed, so any
695  * outstanding calls cannot be doing I/O.
696  */
697 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
698 {
699         struct rxrpc_call *call;
700
701         _enter("");
702
703         if (list_empty(&rxnet->calls))
704                 return;
705
706         write_lock(&rxnet->call_lock);
707
708         while (!list_empty(&rxnet->calls)) {
709                 call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
710                 _debug("Zapping call %p", call);
711
712                 rxrpc_see_call(call);
713                 list_del_init(&call->link);
714
715                 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
716                        call, atomic_read(&call->usage),
717                        rxrpc_call_states[call->state],
718                        call->flags, call->events);
719
720                 write_unlock(&rxnet->call_lock);
721                 cond_resched();
722                 write_lock(&rxnet->call_lock);
723         }
724
725         write_unlock(&rxnet->call_lock);
726
727         atomic_dec(&rxnet->nr_calls);
728         wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
729 }