Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[cascardo/linux.git] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 /*
23  * Maximum lifetime of a call (in jiffies).
24  */
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26
27 /*
28  * Time till dead call expires after last use (in jiffies).
29  */
30 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
31
32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
33         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
34         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
35         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
36         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
37         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
38         [RXRPC_CALL_CLIENT_FINAL_ACK]           = "ClFnlACK",
39         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
40         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
41         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
42         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
43         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
44         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
45         [RXRPC_CALL_COMPLETE]                   = "Complete",
46         [RXRPC_CALL_DEAD]                       = "Dead    ",
47 };
48
49 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
50         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
51         [RXRPC_CALL_SERVER_BUSY]                = "SvBusy  ",
52         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
53         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
54         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
55         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
56 };
57
58 struct kmem_cache *rxrpc_call_jar;
59 LIST_HEAD(rxrpc_calls);
60 DEFINE_RWLOCK(rxrpc_call_lock);
61
62 static void rxrpc_destroy_call(struct work_struct *work);
63 static void rxrpc_call_life_expired(unsigned long _call);
64 static void rxrpc_dead_call_expired(unsigned long _call);
65 static void rxrpc_ack_time_expired(unsigned long _call);
66 static void rxrpc_resend_time_expired(unsigned long _call);
67
68 /*
69  * find an extant server call
70  * - called in process context with IRQs enabled
71  */
72 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
73                                               unsigned long user_call_ID)
74 {
75         struct rxrpc_call *call;
76         struct rb_node *p;
77
78         _enter("%p,%lx", rx, user_call_ID);
79
80         read_lock(&rx->call_lock);
81
82         p = rx->calls.rb_node;
83         while (p) {
84                 call = rb_entry(p, struct rxrpc_call, sock_node);
85
86                 if (user_call_ID < call->user_call_ID)
87                         p = p->rb_left;
88                 else if (user_call_ID > call->user_call_ID)
89                         p = p->rb_right;
90                 else
91                         goto found_extant_call;
92         }
93
94         read_unlock(&rx->call_lock);
95         _leave(" = NULL");
96         return NULL;
97
98 found_extant_call:
99         rxrpc_get_call(call);
100         read_unlock(&rx->call_lock);
101         _leave(" = %p [%d]", call, atomic_read(&call->usage));
102         return call;
103 }
104
105 /*
106  * allocate a new call
107  */
108 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
109 {
110         struct rxrpc_call *call;
111
112         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
113         if (!call)
114                 return NULL;
115
116         call->acks_winsz = 16;
117         call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
118                                     gfp);
119         if (!call->acks_window) {
120                 kmem_cache_free(rxrpc_call_jar, call);
121                 return NULL;
122         }
123
124         setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
125                     (unsigned long) call);
126         setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
127                     (unsigned long) call);
128         setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
129                     (unsigned long) call);
130         setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
131                     (unsigned long) call);
132         INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
133         INIT_WORK(&call->processor, &rxrpc_process_call);
134         INIT_LIST_HEAD(&call->link);
135         INIT_LIST_HEAD(&call->chan_wait_link);
136         INIT_LIST_HEAD(&call->accept_link);
137         skb_queue_head_init(&call->rx_queue);
138         skb_queue_head_init(&call->rx_oos_queue);
139         skb_queue_head_init(&call->knlrecv_queue);
140         init_waitqueue_head(&call->waitq);
141         spin_lock_init(&call->lock);
142         rwlock_init(&call->state_lock);
143         atomic_set(&call->usage, 1);
144         call->debug_id = atomic_inc_return(&rxrpc_debug_id);
145
146         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
147
148         call->rx_data_expect = 1;
149         call->rx_data_eaten = 0;
150         call->rx_first_oos = 0;
151         call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
152         call->creation_jif = jiffies;
153         return call;
154 }
155
156 /*
157  * Allocate a new client call.
158  */
159 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
160                                                   struct sockaddr_rxrpc *srx,
161                                                   gfp_t gfp)
162 {
163         struct rxrpc_call *call;
164
165         _enter("");
166
167         ASSERT(rx->local != NULL);
168
169         call = rxrpc_alloc_call(gfp);
170         if (!call)
171                 return ERR_PTR(-ENOMEM);
172         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
173
174         sock_hold(&rx->sk);
175         call->socket = rx;
176         call->rx_data_post = 1;
177         call->service_id = srx->srx_service;
178
179         _leave(" = %p", call);
180         return call;
181 }
182
183 /*
184  * Begin client call.
185  */
186 static int rxrpc_begin_client_call(struct rxrpc_call *call,
187                                    struct rxrpc_conn_parameters *cp,
188                                    struct sockaddr_rxrpc *srx,
189                                    gfp_t gfp)
190 {
191         int ret;
192
193         /* Set up or get a connection record and set the protocol parameters,
194          * including channel number and call ID.
195          */
196         ret = rxrpc_connect_call(call, cp, srx, gfp);
197         if (ret < 0)
198                 return ret;
199
200         call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
201
202         spin_lock(&call->conn->params.peer->lock);
203         hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
204         spin_unlock(&call->conn->params.peer->lock);
205
206         call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
207         add_timer(&call->lifetimer);
208         return 0;
209 }
210
211 /*
212  * set up a call for the given data
213  * - called in process context with IRQs enabled
214  */
215 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
216                                          struct rxrpc_conn_parameters *cp,
217                                          struct sockaddr_rxrpc *srx,
218                                          unsigned long user_call_ID,
219                                          gfp_t gfp)
220 {
221         struct rxrpc_call *call, *xcall;
222         struct rb_node *parent, **pp;
223         const void *here = __builtin_return_address(0);
224         int ret;
225
226         _enter("%p,%lx", rx, user_call_ID);
227
228         call = rxrpc_alloc_client_call(rx, srx, gfp);
229         if (IS_ERR(call)) {
230                 _leave(" = %ld", PTR_ERR(call));
231                 return call;
232         }
233
234         trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
235                          (const void *)user_call_ID);
236
237         /* Publish the call, even though it is incompletely set up as yet */
238         call->user_call_ID = user_call_ID;
239         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
240
241         write_lock(&rx->call_lock);
242
243         pp = &rx->calls.rb_node;
244         parent = NULL;
245         while (*pp) {
246                 parent = *pp;
247                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
248
249                 if (user_call_ID < xcall->user_call_ID)
250                         pp = &(*pp)->rb_left;
251                 else if (user_call_ID > xcall->user_call_ID)
252                         pp = &(*pp)->rb_right;
253                 else
254                         goto found_user_ID_now_present;
255         }
256
257         rxrpc_get_call(call);
258
259         rb_link_node(&call->sock_node, parent, pp);
260         rb_insert_color(&call->sock_node, &rx->calls);
261         write_unlock(&rx->call_lock);
262
263         write_lock_bh(&rxrpc_call_lock);
264         list_add_tail(&call->link, &rxrpc_calls);
265         write_unlock_bh(&rxrpc_call_lock);
266
267         ret = rxrpc_begin_client_call(call, cp, srx, gfp);
268         if (ret < 0)
269                 goto error;
270
271         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
272
273         _leave(" = %p [new]", call);
274         return call;
275
276 error:
277         write_lock(&rx->call_lock);
278         rb_erase(&call->sock_node, &rx->calls);
279         write_unlock(&rx->call_lock);
280         rxrpc_put_call(call);
281
282         write_lock_bh(&rxrpc_call_lock);
283         list_del_init(&call->link);
284         write_unlock_bh(&rxrpc_call_lock);
285
286         set_bit(RXRPC_CALL_RELEASED, &call->flags);
287         call->state = RXRPC_CALL_DEAD;
288         rxrpc_put_call(call);
289         _leave(" = %d", ret);
290         return ERR_PTR(ret);
291
292         /* We unexpectedly found the user ID in the list after taking
293          * the call_lock.  This shouldn't happen unless the user races
294          * with itself and tries to add the same user ID twice at the
295          * same time in different threads.
296          */
297 found_user_ID_now_present:
298         write_unlock(&rx->call_lock);
299         set_bit(RXRPC_CALL_RELEASED, &call->flags);
300         call->state = RXRPC_CALL_DEAD;
301         rxrpc_put_call(call);
302         _leave(" = -EEXIST [%p]", call);
303         return ERR_PTR(-EEXIST);
304 }
305
306 /*
307  * set up an incoming call
308  * - called in process context with IRQs enabled
309  */
310 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
311                                        struct rxrpc_connection *conn,
312                                        struct sk_buff *skb)
313 {
314         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
315         struct rxrpc_call *call, *candidate;
316         const void *here = __builtin_return_address(0);
317         u32 call_id, chan;
318
319         _enter(",%d", conn->debug_id);
320
321         ASSERT(rx != NULL);
322
323         candidate = rxrpc_alloc_call(GFP_NOIO);
324         if (!candidate)
325                 return ERR_PTR(-EBUSY);
326
327         trace_rxrpc_call(candidate, 1, atomic_read(&candidate->usage),
328                          0, here, NULL);
329
330         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
331         candidate->socket       = rx;
332         candidate->conn         = conn;
333         candidate->peer         = conn->params.peer;
334         candidate->cid          = sp->hdr.cid;
335         candidate->call_id      = sp->hdr.callNumber;
336         candidate->rx_data_post = 0;
337         candidate->state        = RXRPC_CALL_SERVER_ACCEPTING;
338         candidate->flags        |= (1 << RXRPC_CALL_IS_SERVICE);
339         if (conn->security_ix > 0)
340                 candidate->state = RXRPC_CALL_SERVER_SECURING;
341
342         spin_lock(&conn->channel_lock);
343
344         /* set the channel for this call */
345         call = rcu_dereference_protected(conn->channels[chan].call,
346                                          lockdep_is_held(&conn->channel_lock));
347
348         _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
349         if (call && call->call_id == sp->hdr.callNumber) {
350                 /* already set; must've been a duplicate packet */
351                 _debug("extant call [%d]", call->state);
352                 ASSERTCMP(call->conn, ==, conn);
353
354                 read_lock(&call->state_lock);
355                 switch (call->state) {
356                 case RXRPC_CALL_LOCALLY_ABORTED:
357                         if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
358                                 rxrpc_queue_call(call);
359                 case RXRPC_CALL_REMOTELY_ABORTED:
360                         read_unlock(&call->state_lock);
361                         goto aborted_call;
362                 default:
363                         rxrpc_get_call(call);
364                         read_unlock(&call->state_lock);
365                         goto extant_call;
366                 }
367         }
368
369         if (call) {
370                 /* it seems the channel is still in use from the previous call
371                  * - ditch the old binding if its call is now complete */
372                 _debug("CALL: %u { %s }",
373                        call->debug_id, rxrpc_call_states[call->state]);
374
375                 if (call->state == RXRPC_CALL_COMPLETE) {
376                         __rxrpc_disconnect_call(conn, call);
377                 } else {
378                         spin_unlock(&conn->channel_lock);
379                         kmem_cache_free(rxrpc_call_jar, candidate);
380                         _leave(" = -EBUSY");
381                         return ERR_PTR(-EBUSY);
382                 }
383         }
384
385         /* check the call number isn't duplicate */
386         _debug("check dup");
387         call_id = sp->hdr.callNumber;
388
389         /* We just ignore calls prior to the current call ID.  Terminated calls
390          * are handled via the connection.
391          */
392         if (call_id <= conn->channels[chan].call_counter)
393                 goto old_call; /* TODO: Just drop packet */
394
395         /* make the call available */
396         _debug("new call");
397         call = candidate;
398         candidate = NULL;
399         conn->channels[chan].call_counter = call_id;
400         rcu_assign_pointer(conn->channels[chan].call, call);
401         sock_hold(&rx->sk);
402         rxrpc_get_connection(conn);
403         rxrpc_get_peer(call->peer);
404         spin_unlock(&conn->channel_lock);
405
406         spin_lock(&conn->params.peer->lock);
407         hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
408         spin_unlock(&conn->params.peer->lock);
409
410         write_lock_bh(&rxrpc_call_lock);
411         list_add_tail(&call->link, &rxrpc_calls);
412         write_unlock_bh(&rxrpc_call_lock);
413
414         call->service_id = conn->params.service_id;
415
416         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
417
418         call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
419         add_timer(&call->lifetimer);
420         _leave(" = %p {%d} [new]", call, call->debug_id);
421         return call;
422
423 extant_call:
424         spin_unlock(&conn->channel_lock);
425         kmem_cache_free(rxrpc_call_jar, candidate);
426         _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
427         return call;
428
429 aborted_call:
430         spin_unlock(&conn->channel_lock);
431         kmem_cache_free(rxrpc_call_jar, candidate);
432         _leave(" = -ECONNABORTED");
433         return ERR_PTR(-ECONNABORTED);
434
435 old_call:
436         spin_unlock(&conn->channel_lock);
437         kmem_cache_free(rxrpc_call_jar, candidate);
438         _leave(" = -ECONNRESET [old]");
439         return ERR_PTR(-ECONNRESET);
440 }
441
442 /*
443  * Note the re-emergence of a call.
444  */
445 void rxrpc_see_call(struct rxrpc_call *call)
446 {
447         const void *here = __builtin_return_address(0);
448         if (call) {
449                 int n = atomic_read(&call->usage);
450                 int m = atomic_read(&call->skb_count);
451
452                 trace_rxrpc_call(call, 2, n, m, here, 0);
453         }
454 }
455
456 /*
457  * Note the addition of a ref on a call.
458  */
459 void rxrpc_get_call(struct rxrpc_call *call)
460 {
461         const void *here = __builtin_return_address(0);
462         int n = atomic_inc_return(&call->usage);
463         int m = atomic_read(&call->skb_count);
464
465         trace_rxrpc_call(call, 3, n, m, here, 0);
466 }
467
468 /*
469  * Note the addition of a ref on a call for a socket buffer.
470  */
471 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
472 {
473         const void *here = __builtin_return_address(0);
474         int n = atomic_inc_return(&call->usage);
475         int m = atomic_inc_return(&call->skb_count);
476
477         trace_rxrpc_call(call, 4, n, m, here, skb);
478 }
479
480 /*
481  * detach a call from a socket and set up for release
482  */
483 void rxrpc_release_call(struct rxrpc_call *call)
484 {
485         struct rxrpc_connection *conn = call->conn;
486         struct rxrpc_sock *rx = call->socket;
487
488         _enter("{%d,%d,%d,%d}",
489                call->debug_id, atomic_read(&call->usage),
490                atomic_read(&call->ackr_not_idle),
491                call->rx_first_oos);
492
493         rxrpc_see_call(call);
494
495         spin_lock_bh(&call->lock);
496         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
497                 BUG();
498         spin_unlock_bh(&call->lock);
499
500         /* dissociate from the socket
501          * - the socket's ref on the call is passed to the death timer
502          */
503         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
504
505         spin_lock(&conn->params.peer->lock);
506         hlist_del_init(&call->error_link);
507         spin_unlock(&conn->params.peer->lock);
508
509         write_lock_bh(&rx->call_lock);
510         if (!list_empty(&call->accept_link)) {
511                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
512                        call, call->events, call->flags);
513                 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
514                 list_del_init(&call->accept_link);
515                 sk_acceptq_removed(&rx->sk);
516         } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
517                 rb_erase(&call->sock_node, &rx->calls);
518                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
519                 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
520         }
521         write_unlock_bh(&rx->call_lock);
522
523         /* free up the channel for reuse */
524         write_lock_bh(&call->state_lock);
525
526         if (call->state < RXRPC_CALL_COMPLETE &&
527             call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
528                 _debug("+++ ABORTING STATE %d +++\n", call->state);
529                 __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
530         }
531         write_unlock_bh(&call->state_lock);
532
533         rxrpc_disconnect_call(call);
534
535         /* clean up the Rx queue */
536         if (!skb_queue_empty(&call->rx_queue) ||
537             !skb_queue_empty(&call->rx_oos_queue)) {
538                 struct rxrpc_skb_priv *sp;
539                 struct sk_buff *skb;
540
541                 _debug("purge Rx queues");
542
543                 spin_lock_bh(&call->lock);
544                 while ((skb = skb_dequeue(&call->rx_queue)) ||
545                        (skb = skb_dequeue(&call->rx_oos_queue))) {
546                         spin_unlock_bh(&call->lock);
547
548                         sp = rxrpc_skb(skb);
549                         _debug("- zap %s %%%u #%u",
550                                rxrpc_pkts[sp->hdr.type],
551                                sp->hdr.serial, sp->hdr.seq);
552                         rxrpc_free_skb(skb);
553                         spin_lock_bh(&call->lock);
554                 }
555                 spin_unlock_bh(&call->lock);
556         }
557
558         del_timer_sync(&call->resend_timer);
559         del_timer_sync(&call->ack_timer);
560         del_timer_sync(&call->lifetimer);
561         call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
562         add_timer(&call->deadspan);
563
564         _leave("");
565 }
566
567 /*
568  * handle a dead call being ready for reaping
569  */
570 static void rxrpc_dead_call_expired(unsigned long _call)
571 {
572         struct rxrpc_call *call = (struct rxrpc_call *) _call;
573
574         _enter("{%d}", call->debug_id);
575
576         rxrpc_see_call(call);
577         write_lock_bh(&call->state_lock);
578         call->state = RXRPC_CALL_DEAD;
579         write_unlock_bh(&call->state_lock);
580         rxrpc_put_call(call);
581 }
582
583 /*
584  * mark a call as to be released, aborting it if it's still in progress
585  * - called with softirqs disabled
586  */
587 static void rxrpc_mark_call_released(struct rxrpc_call *call)
588 {
589         bool sched;
590
591         rxrpc_see_call(call);
592         write_lock(&call->state_lock);
593         if (call->state < RXRPC_CALL_DEAD) {
594                 sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
595                 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
596                         sched = true;
597         }
598         write_unlock(&call->state_lock);
599         if (sched)
600                 rxrpc_queue_call(call);
601 }
602
603 /*
604  * release all the calls associated with a socket
605  */
606 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
607 {
608         struct rxrpc_call *call;
609         struct rb_node *p;
610
611         _enter("%p", rx);
612
613         read_lock_bh(&rx->call_lock);
614
615         /* kill the not-yet-accepted incoming calls */
616         list_for_each_entry(call, &rx->secureq, accept_link) {
617                 rxrpc_mark_call_released(call);
618         }
619
620         list_for_each_entry(call, &rx->acceptq, accept_link) {
621                 rxrpc_mark_call_released(call);
622         }
623
624         /* mark all the calls as no longer wanting incoming packets */
625         for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
626                 call = rb_entry(p, struct rxrpc_call, sock_node);
627                 rxrpc_mark_call_released(call);
628         }
629
630         read_unlock_bh(&rx->call_lock);
631         _leave("");
632 }
633
634 /*
635  * release a call
636  */
637 void rxrpc_put_call(struct rxrpc_call *call)
638 {
639         const void *here = __builtin_return_address(0);
640         int n, m;
641
642         ASSERT(call != NULL);
643
644         n = atomic_dec_return(&call->usage);
645         m = atomic_read(&call->skb_count);
646         trace_rxrpc_call(call, 5, n, m, here, NULL);
647         ASSERTCMP(n, >=, 0);
648         if (n == 0) {
649                 _debug("call %d dead", call->debug_id);
650                 WARN_ON(m != 0);
651                 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
652                 rxrpc_queue_work(&call->destroyer);
653         }
654 }
655
656 /*
657  * Release a call ref held by a socket buffer.
658  */
659 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
660 {
661         const void *here = __builtin_return_address(0);
662         int n, m;
663
664         n = atomic_dec_return(&call->usage);
665         m = atomic_dec_return(&call->skb_count);
666         trace_rxrpc_call(call, 6, n, m, here, skb);
667         ASSERTCMP(n, >=, 0);
668         if (n == 0) {
669                 _debug("call %d dead", call->debug_id);
670                 WARN_ON(m != 0);
671                 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
672                 rxrpc_queue_work(&call->destroyer);
673         }
674 }
675
676 /*
677  * Final call destruction under RCU.
678  */
679 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
680 {
681         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
682
683         rxrpc_purge_queue(&call->rx_queue);
684         rxrpc_purge_queue(&call->knlrecv_queue);
685         rxrpc_put_peer(call->peer);
686         kmem_cache_free(rxrpc_call_jar, call);
687 }
688
689 /*
690  * clean up a call
691  */
692 static void rxrpc_cleanup_call(struct rxrpc_call *call)
693 {
694         _net("DESTROY CALL %d", call->debug_id);
695
696         ASSERT(call->socket);
697
698         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
699
700         del_timer_sync(&call->lifetimer);
701         del_timer_sync(&call->deadspan);
702         del_timer_sync(&call->ack_timer);
703         del_timer_sync(&call->resend_timer);
704
705         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
706         ASSERTCMP(call->events, ==, 0);
707         if (work_pending(&call->processor)) {
708                 _debug("defer destroy");
709                 rxrpc_queue_work(&call->destroyer);
710                 return;
711         }
712
713         ASSERTCMP(call->conn, ==, NULL);
714
715         if (call->acks_window) {
716                 _debug("kill Tx window %d",
717                        CIRC_CNT(call->acks_head, call->acks_tail,
718                                 call->acks_winsz));
719                 smp_mb();
720                 while (CIRC_CNT(call->acks_head, call->acks_tail,
721                                 call->acks_winsz) > 0) {
722                         struct rxrpc_skb_priv *sp;
723                         unsigned long _skb;
724
725                         _skb = call->acks_window[call->acks_tail] & ~1;
726                         sp = rxrpc_skb((struct sk_buff *)_skb);
727                         _debug("+++ clear Tx %u", sp->hdr.seq);
728                         rxrpc_free_skb((struct sk_buff *)_skb);
729                         call->acks_tail =
730                                 (call->acks_tail + 1) & (call->acks_winsz - 1);
731                 }
732
733                 kfree(call->acks_window);
734         }
735
736         rxrpc_free_skb(call->tx_pending);
737
738         rxrpc_purge_queue(&call->rx_queue);
739         ASSERT(skb_queue_empty(&call->rx_oos_queue));
740         rxrpc_purge_queue(&call->knlrecv_queue);
741         sock_put(&call->socket->sk);
742         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
743 }
744
745 /*
746  * destroy a call
747  */
748 static void rxrpc_destroy_call(struct work_struct *work)
749 {
750         struct rxrpc_call *call =
751                 container_of(work, struct rxrpc_call, destroyer);
752
753         _enter("%p{%d,%x,%p}",
754                call, atomic_read(&call->usage), call->cid, call->conn);
755
756         ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
757
758         write_lock_bh(&rxrpc_call_lock);
759         list_del_init(&call->link);
760         write_unlock_bh(&rxrpc_call_lock);
761
762         rxrpc_cleanup_call(call);
763         _leave("");
764 }
765
766 /*
767  * preemptively destroy all the call records from a transport endpoint rather
768  * than waiting for them to time out
769  */
770 void __exit rxrpc_destroy_all_calls(void)
771 {
772         struct rxrpc_call *call;
773
774         _enter("");
775         write_lock_bh(&rxrpc_call_lock);
776
777         while (!list_empty(&rxrpc_calls)) {
778                 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
779                 _debug("Zapping call %p", call);
780
781                 rxrpc_see_call(call);
782                 list_del_init(&call->link);
783
784                 switch (atomic_read(&call->usage)) {
785                 case 0:
786                         ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
787                         break;
788                 case 1:
789                         if (del_timer_sync(&call->deadspan) != 0 &&
790                             call->state != RXRPC_CALL_DEAD)
791                                 rxrpc_dead_call_expired((unsigned long) call);
792                         if (call->state != RXRPC_CALL_DEAD)
793                                 break;
794                 default:
795                         pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
796                                call, atomic_read(&call->usage),
797                                atomic_read(&call->ackr_not_idle),
798                                rxrpc_call_states[call->state],
799                                call->flags, call->events);
800                         if (!skb_queue_empty(&call->rx_queue))
801                                 pr_err("Rx queue occupied\n");
802                         if (!skb_queue_empty(&call->rx_oos_queue))
803                                 pr_err("OOS queue occupied\n");
804                         break;
805                 }
806
807                 write_unlock_bh(&rxrpc_call_lock);
808                 cond_resched();
809                 write_lock_bh(&rxrpc_call_lock);
810         }
811
812         write_unlock_bh(&rxrpc_call_lock);
813         _leave("");
814 }
815
816 /*
817  * handle call lifetime being exceeded
818  */
819 static void rxrpc_call_life_expired(unsigned long _call)
820 {
821         struct rxrpc_call *call = (struct rxrpc_call *) _call;
822
823         _enter("{%d}", call->debug_id);
824
825         rxrpc_see_call(call);
826         if (call->state >= RXRPC_CALL_COMPLETE)
827                 return;
828
829         set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
830         rxrpc_queue_call(call);
831 }
832
833 /*
834  * handle resend timer expiry
835  * - may not take call->state_lock as this can deadlock against del_timer_sync()
836  */
837 static void rxrpc_resend_time_expired(unsigned long _call)
838 {
839         struct rxrpc_call *call = (struct rxrpc_call *) _call;
840
841         _enter("{%d}", call->debug_id);
842
843         rxrpc_see_call(call);
844         if (call->state >= RXRPC_CALL_COMPLETE)
845                 return;
846
847         clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
848         if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
849                 rxrpc_queue_call(call);
850 }
851
852 /*
853  * handle ACK timer expiry
854  */
855 static void rxrpc_ack_time_expired(unsigned long _call)
856 {
857         struct rxrpc_call *call = (struct rxrpc_call *) _call;
858
859         _enter("{%d}", call->debug_id);
860
861         rxrpc_see_call(call);
862         if (call->state >= RXRPC_CALL_COMPLETE)
863                 return;
864
865         if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
866                 rxrpc_queue_call(call);
867 }