1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
23 * Maximum lifetime of a call (in jiffies).
25 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
28 * Time till dead call expires after last use (in jiffies).
30 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
32 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
33 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
34 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
35 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
36 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
37 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
38 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
39 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
40 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
41 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
42 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
43 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
44 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
45 [RXRPC_CALL_COMPLETE] = "Complete",
46 [RXRPC_CALL_DEAD] = "Dead ",
49 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
50 [RXRPC_CALL_SUCCEEDED] = "Complete",
51 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
52 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
53 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
54 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
55 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
58 const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
59 [rxrpc_call_new_client] = "NWc",
60 [rxrpc_call_new_service] = "NWs",
61 [rxrpc_call_queued] = "QUE",
62 [rxrpc_call_queued_ref] = "QUR",
63 [rxrpc_call_seen] = "SEE",
64 [rxrpc_call_got] = "GOT",
65 [rxrpc_call_got_skb] = "Gsk",
66 [rxrpc_call_got_userid] = "Gus",
67 [rxrpc_call_put] = "PUT",
68 [rxrpc_call_put_skb] = "Psk",
69 [rxrpc_call_put_userid] = "Pus",
70 [rxrpc_call_put_noqueue] = "PNQ",
73 struct kmem_cache *rxrpc_call_jar;
74 LIST_HEAD(rxrpc_calls);
75 DEFINE_RWLOCK(rxrpc_call_lock);
77 static void rxrpc_destroy_call(struct work_struct *work);
78 static void rxrpc_call_life_expired(unsigned long _call);
79 static void rxrpc_dead_call_expired(unsigned long _call);
80 static void rxrpc_ack_time_expired(unsigned long _call);
81 static void rxrpc_resend_time_expired(unsigned long _call);
84 * find an extant server call
85 * - called in process context with IRQs enabled
87 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
88 unsigned long user_call_ID)
90 struct rxrpc_call *call;
93 _enter("%p,%lx", rx, user_call_ID);
95 read_lock(&rx->call_lock);
97 p = rx->calls.rb_node;
99 call = rb_entry(p, struct rxrpc_call, sock_node);
101 if (user_call_ID < call->user_call_ID)
103 else if (user_call_ID > call->user_call_ID)
106 goto found_extant_call;
109 read_unlock(&rx->call_lock);
114 rxrpc_get_call(call, rxrpc_call_got);
115 read_unlock(&rx->call_lock);
116 _leave(" = %p [%d]", call, atomic_read(&call->usage));
121 * allocate a new call
123 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
125 struct rxrpc_call *call;
127 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
131 call->acks_winsz = 16;
132 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
134 if (!call->acks_window) {
135 kmem_cache_free(rxrpc_call_jar, call);
139 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
140 (unsigned long) call);
141 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
142 (unsigned long) call);
143 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
144 (unsigned long) call);
145 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
146 (unsigned long) call);
147 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
148 INIT_WORK(&call->processor, &rxrpc_process_call);
149 INIT_LIST_HEAD(&call->link);
150 INIT_LIST_HEAD(&call->chan_wait_link);
151 INIT_LIST_HEAD(&call->accept_link);
152 skb_queue_head_init(&call->rx_queue);
153 skb_queue_head_init(&call->rx_oos_queue);
154 skb_queue_head_init(&call->knlrecv_queue);
155 init_waitqueue_head(&call->waitq);
156 spin_lock_init(&call->lock);
157 rwlock_init(&call->state_lock);
158 atomic_set(&call->usage, 1);
159 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
161 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
163 call->rx_data_expect = 1;
164 call->rx_data_eaten = 0;
165 call->rx_first_oos = 0;
166 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
167 call->creation_jif = jiffies;
172 * Allocate a new client call.
174 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
175 struct sockaddr_rxrpc *srx,
178 struct rxrpc_call *call;
182 ASSERT(rx->local != NULL);
184 call = rxrpc_alloc_call(gfp);
186 return ERR_PTR(-ENOMEM);
187 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
191 call->rx_data_post = 1;
192 call->service_id = srx->srx_service;
194 _leave(" = %p", call);
201 static int rxrpc_begin_client_call(struct rxrpc_call *call,
202 struct rxrpc_conn_parameters *cp,
203 struct sockaddr_rxrpc *srx,
208 /* Set up or get a connection record and set the protocol parameters,
209 * including channel number and call ID.
211 ret = rxrpc_connect_call(call, cp, srx, gfp);
215 spin_lock(&call->conn->params.peer->lock);
216 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
217 spin_unlock(&call->conn->params.peer->lock);
219 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
220 add_timer(&call->lifetimer);
225 * set up a call for the given data
226 * - called in process context with IRQs enabled
228 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
229 struct rxrpc_conn_parameters *cp,
230 struct sockaddr_rxrpc *srx,
231 unsigned long user_call_ID,
234 struct rxrpc_call *call, *xcall;
235 struct rb_node *parent, **pp;
236 const void *here = __builtin_return_address(0);
239 _enter("%p,%lx", rx, user_call_ID);
241 call = rxrpc_alloc_client_call(rx, srx, gfp);
243 _leave(" = %ld", PTR_ERR(call));
247 trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
248 (const void *)user_call_ID);
250 /* Publish the call, even though it is incompletely set up as yet */
251 call->user_call_ID = user_call_ID;
252 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
254 write_lock(&rx->call_lock);
256 pp = &rx->calls.rb_node;
260 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
262 if (user_call_ID < xcall->user_call_ID)
263 pp = &(*pp)->rb_left;
264 else if (user_call_ID > xcall->user_call_ID)
265 pp = &(*pp)->rb_right;
267 goto found_user_ID_now_present;
270 rxrpc_get_call(call, rxrpc_call_got_userid);
271 rb_link_node(&call->sock_node, parent, pp);
272 rb_insert_color(&call->sock_node, &rx->calls);
273 write_unlock(&rx->call_lock);
275 write_lock_bh(&rxrpc_call_lock);
276 list_add_tail(&call->link, &rxrpc_calls);
277 write_unlock_bh(&rxrpc_call_lock);
279 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
283 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
285 _leave(" = %p [new]", call);
289 write_lock(&rx->call_lock);
290 rb_erase(&call->sock_node, &rx->calls);
291 write_unlock(&rx->call_lock);
292 rxrpc_put_call(call, rxrpc_call_put_userid);
294 write_lock_bh(&rxrpc_call_lock);
295 list_del_init(&call->link);
296 write_unlock_bh(&rxrpc_call_lock);
298 set_bit(RXRPC_CALL_RELEASED, &call->flags);
299 call->state = RXRPC_CALL_DEAD;
300 rxrpc_put_call(call, rxrpc_call_put);
301 _leave(" = %d", ret);
304 /* We unexpectedly found the user ID in the list after taking
305 * the call_lock. This shouldn't happen unless the user races
306 * with itself and tries to add the same user ID twice at the
307 * same time in different threads.
309 found_user_ID_now_present:
310 write_unlock(&rx->call_lock);
311 set_bit(RXRPC_CALL_RELEASED, &call->flags);
312 call->state = RXRPC_CALL_DEAD;
313 rxrpc_put_call(call, rxrpc_call_put);
314 _leave(" = -EEXIST [%p]", call);
315 return ERR_PTR(-EEXIST);
319 * set up an incoming call
320 * - called in process context with IRQs enabled
322 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
323 struct rxrpc_connection *conn,
326 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
327 struct rxrpc_call *call, *candidate;
328 const void *here = __builtin_return_address(0);
331 _enter(",%d", conn->debug_id);
335 candidate = rxrpc_alloc_call(GFP_NOIO);
337 return ERR_PTR(-EBUSY);
339 trace_rxrpc_call(candidate, rxrpc_call_new_service,
340 atomic_read(&candidate->usage), 0, here, NULL);
342 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
343 candidate->socket = rx;
344 candidate->conn = conn;
345 candidate->peer = conn->params.peer;
346 candidate->cid = sp->hdr.cid;
347 candidate->call_id = sp->hdr.callNumber;
348 candidate->rx_data_post = 0;
349 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
350 candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
351 if (conn->security_ix > 0)
352 candidate->state = RXRPC_CALL_SERVER_SECURING;
354 spin_lock(&conn->channel_lock);
356 /* set the channel for this call */
357 call = rcu_dereference_protected(conn->channels[chan].call,
358 lockdep_is_held(&conn->channel_lock));
360 _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
361 if (call && call->call_id == sp->hdr.callNumber) {
362 /* already set; must've been a duplicate packet */
363 _debug("extant call [%d]", call->state);
364 ASSERTCMP(call->conn, ==, conn);
366 read_lock(&call->state_lock);
367 switch (call->state) {
368 case RXRPC_CALL_LOCALLY_ABORTED:
369 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
370 rxrpc_queue_call(call);
371 case RXRPC_CALL_REMOTELY_ABORTED:
372 read_unlock(&call->state_lock);
375 rxrpc_get_call(call, rxrpc_call_got);
376 read_unlock(&call->state_lock);
382 /* it seems the channel is still in use from the previous call
383 * - ditch the old binding if its call is now complete */
384 _debug("CALL: %u { %s }",
385 call->debug_id, rxrpc_call_states[call->state]);
387 if (call->state == RXRPC_CALL_COMPLETE) {
388 __rxrpc_disconnect_call(conn, call);
390 spin_unlock(&conn->channel_lock);
391 kmem_cache_free(rxrpc_call_jar, candidate);
393 return ERR_PTR(-EBUSY);
397 /* check the call number isn't duplicate */
399 call_id = sp->hdr.callNumber;
401 /* We just ignore calls prior to the current call ID. Terminated calls
402 * are handled via the connection.
404 if (call_id <= conn->channels[chan].call_counter)
405 goto old_call; /* TODO: Just drop packet */
407 /* make the call available */
411 conn->channels[chan].call_counter = call_id;
412 rcu_assign_pointer(conn->channels[chan].call, call);
414 rxrpc_get_connection(conn);
415 rxrpc_get_peer(call->peer);
416 spin_unlock(&conn->channel_lock);
418 spin_lock(&conn->params.peer->lock);
419 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
420 spin_unlock(&conn->params.peer->lock);
422 write_lock_bh(&rxrpc_call_lock);
423 list_add_tail(&call->link, &rxrpc_calls);
424 write_unlock_bh(&rxrpc_call_lock);
426 call->service_id = conn->params.service_id;
428 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
430 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
431 add_timer(&call->lifetimer);
432 _leave(" = %p {%d} [new]", call, call->debug_id);
436 spin_unlock(&conn->channel_lock);
437 kmem_cache_free(rxrpc_call_jar, candidate);
438 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
442 spin_unlock(&conn->channel_lock);
443 kmem_cache_free(rxrpc_call_jar, candidate);
444 _leave(" = -ECONNABORTED");
445 return ERR_PTR(-ECONNABORTED);
448 spin_unlock(&conn->channel_lock);
449 kmem_cache_free(rxrpc_call_jar, candidate);
450 _leave(" = -ECONNRESET [old]");
451 return ERR_PTR(-ECONNRESET);
455 * Note the re-emergence of a call.
457 void rxrpc_see_call(struct rxrpc_call *call)
459 const void *here = __builtin_return_address(0);
461 int n = atomic_read(&call->usage);
462 int m = atomic_read(&call->skb_count);
464 trace_rxrpc_call(call, rxrpc_call_seen, n, m, here, NULL);
469 * Note the addition of a ref on a call.
471 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
473 const void *here = __builtin_return_address(0);
474 int n = atomic_inc_return(&call->usage);
475 int m = atomic_read(&call->skb_count);
477 trace_rxrpc_call(call, op, n, m, here, NULL);
481 * Note the addition of a ref on a call for a socket buffer.
483 void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
485 const void *here = __builtin_return_address(0);
486 int n = atomic_inc_return(&call->usage);
487 int m = atomic_inc_return(&call->skb_count);
489 trace_rxrpc_call(call, rxrpc_call_got_skb, n, m, here, skb);
493 * detach a call from a socket and set up for release
495 void rxrpc_release_call(struct rxrpc_call *call)
497 struct rxrpc_connection *conn = call->conn;
498 struct rxrpc_sock *rx = call->socket;
500 _enter("{%d,%d,%d,%d}",
501 call->debug_id, atomic_read(&call->usage),
502 atomic_read(&call->ackr_not_idle),
505 rxrpc_see_call(call);
507 spin_lock_bh(&call->lock);
508 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
510 spin_unlock_bh(&call->lock);
512 /* dissociate from the socket
513 * - the socket's ref on the call is passed to the death timer
515 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
517 spin_lock(&conn->params.peer->lock);
518 hlist_del_init(&call->error_link);
519 spin_unlock(&conn->params.peer->lock);
521 write_lock_bh(&rx->call_lock);
522 if (!list_empty(&call->accept_link)) {
523 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
524 call, call->events, call->flags);
525 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
526 list_del_init(&call->accept_link);
527 sk_acceptq_removed(&rx->sk);
528 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
529 rb_erase(&call->sock_node, &rx->calls);
530 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
531 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
533 write_unlock_bh(&rx->call_lock);
535 /* free up the channel for reuse */
536 write_lock_bh(&call->state_lock);
538 if (call->state < RXRPC_CALL_COMPLETE &&
539 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
540 _debug("+++ ABORTING STATE %d +++\n", call->state);
541 __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
543 write_unlock_bh(&call->state_lock);
545 rxrpc_disconnect_call(call);
547 /* clean up the Rx queue */
548 if (!skb_queue_empty(&call->rx_queue) ||
549 !skb_queue_empty(&call->rx_oos_queue)) {
550 struct rxrpc_skb_priv *sp;
553 _debug("purge Rx queues");
555 spin_lock_bh(&call->lock);
556 while ((skb = skb_dequeue(&call->rx_queue)) ||
557 (skb = skb_dequeue(&call->rx_oos_queue))) {
558 spin_unlock_bh(&call->lock);
561 _debug("- zap %s %%%u #%u",
562 rxrpc_pkts[sp->hdr.type],
563 sp->hdr.serial, sp->hdr.seq);
565 spin_lock_bh(&call->lock);
567 spin_unlock_bh(&call->lock);
570 del_timer_sync(&call->resend_timer);
571 del_timer_sync(&call->ack_timer);
572 del_timer_sync(&call->lifetimer);
573 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
574 add_timer(&call->deadspan);
580 * handle a dead call being ready for reaping
582 static void rxrpc_dead_call_expired(unsigned long _call)
584 struct rxrpc_call *call = (struct rxrpc_call *) _call;
586 _enter("{%d}", call->debug_id);
588 rxrpc_see_call(call);
589 write_lock_bh(&call->state_lock);
590 call->state = RXRPC_CALL_DEAD;
591 write_unlock_bh(&call->state_lock);
592 rxrpc_put_call(call, rxrpc_call_put);
596 * mark a call as to be released, aborting it if it's still in progress
597 * - called with softirqs disabled
599 static void rxrpc_mark_call_released(struct rxrpc_call *call)
603 rxrpc_see_call(call);
604 write_lock(&call->state_lock);
605 if (call->state < RXRPC_CALL_DEAD) {
606 sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
607 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
610 write_unlock(&call->state_lock);
612 rxrpc_queue_call(call);
616 * release all the calls associated with a socket
618 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
620 struct rxrpc_call *call;
625 read_lock_bh(&rx->call_lock);
627 /* kill the not-yet-accepted incoming calls */
628 list_for_each_entry(call, &rx->secureq, accept_link) {
629 rxrpc_mark_call_released(call);
632 list_for_each_entry(call, &rx->acceptq, accept_link) {
633 rxrpc_mark_call_released(call);
636 /* mark all the calls as no longer wanting incoming packets */
637 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
638 call = rb_entry(p, struct rxrpc_call, sock_node);
639 rxrpc_mark_call_released(call);
642 read_unlock_bh(&rx->call_lock);
649 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
651 const void *here = __builtin_return_address(0);
654 ASSERT(call != NULL);
656 n = atomic_dec_return(&call->usage);
657 m = atomic_read(&call->skb_count);
658 trace_rxrpc_call(call, op, n, m, here, NULL);
661 _debug("call %d dead", call->debug_id);
663 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
664 rxrpc_queue_work(&call->destroyer);
669 * Release a call ref held by a socket buffer.
671 void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
673 const void *here = __builtin_return_address(0);
676 n = atomic_dec_return(&call->usage);
677 m = atomic_dec_return(&call->skb_count);
678 trace_rxrpc_call(call, rxrpc_call_put_skb, n, m, here, skb);
681 _debug("call %d dead", call->debug_id);
683 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
684 rxrpc_queue_work(&call->destroyer);
689 * Final call destruction under RCU.
691 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
693 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
695 rxrpc_purge_queue(&call->rx_queue);
696 rxrpc_purge_queue(&call->knlrecv_queue);
697 rxrpc_put_peer(call->peer);
698 kmem_cache_free(rxrpc_call_jar, call);
704 static void rxrpc_cleanup_call(struct rxrpc_call *call)
706 _net("DESTROY CALL %d", call->debug_id);
708 ASSERT(call->socket);
710 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
712 del_timer_sync(&call->lifetimer);
713 del_timer_sync(&call->deadspan);
714 del_timer_sync(&call->ack_timer);
715 del_timer_sync(&call->resend_timer);
717 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
718 ASSERTCMP(call->events, ==, 0);
719 if (work_pending(&call->processor)) {
720 _debug("defer destroy");
721 rxrpc_queue_work(&call->destroyer);
725 ASSERTCMP(call->conn, ==, NULL);
727 if (call->acks_window) {
728 _debug("kill Tx window %d",
729 CIRC_CNT(call->acks_head, call->acks_tail,
732 while (CIRC_CNT(call->acks_head, call->acks_tail,
733 call->acks_winsz) > 0) {
734 struct rxrpc_skb_priv *sp;
737 _skb = call->acks_window[call->acks_tail] & ~1;
738 sp = rxrpc_skb((struct sk_buff *)_skb);
739 _debug("+++ clear Tx %u", sp->hdr.seq);
740 rxrpc_free_skb((struct sk_buff *)_skb);
742 (call->acks_tail + 1) & (call->acks_winsz - 1);
745 kfree(call->acks_window);
748 rxrpc_free_skb(call->tx_pending);
750 rxrpc_purge_queue(&call->rx_queue);
751 ASSERT(skb_queue_empty(&call->rx_oos_queue));
752 rxrpc_purge_queue(&call->knlrecv_queue);
753 sock_put(&call->socket->sk);
754 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
760 static void rxrpc_destroy_call(struct work_struct *work)
762 struct rxrpc_call *call =
763 container_of(work, struct rxrpc_call, destroyer);
765 _enter("%p{%d,%x,%p}",
766 call, atomic_read(&call->usage), call->cid, call->conn);
768 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
770 write_lock_bh(&rxrpc_call_lock);
771 list_del_init(&call->link);
772 write_unlock_bh(&rxrpc_call_lock);
774 rxrpc_cleanup_call(call);
779 * preemptively destroy all the call records from a transport endpoint rather
780 * than waiting for them to time out
782 void __exit rxrpc_destroy_all_calls(void)
784 struct rxrpc_call *call;
787 write_lock_bh(&rxrpc_call_lock);
789 while (!list_empty(&rxrpc_calls)) {
790 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
791 _debug("Zapping call %p", call);
793 rxrpc_see_call(call);
794 list_del_init(&call->link);
796 switch (atomic_read(&call->usage)) {
798 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
801 if (del_timer_sync(&call->deadspan) != 0 &&
802 call->state != RXRPC_CALL_DEAD)
803 rxrpc_dead_call_expired((unsigned long) call);
804 if (call->state != RXRPC_CALL_DEAD)
807 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
808 call, atomic_read(&call->usage),
809 atomic_read(&call->ackr_not_idle),
810 rxrpc_call_states[call->state],
811 call->flags, call->events);
812 if (!skb_queue_empty(&call->rx_queue))
813 pr_err("Rx queue occupied\n");
814 if (!skb_queue_empty(&call->rx_oos_queue))
815 pr_err("OOS queue occupied\n");
819 write_unlock_bh(&rxrpc_call_lock);
821 write_lock_bh(&rxrpc_call_lock);
824 write_unlock_bh(&rxrpc_call_lock);
829 * handle call lifetime being exceeded
831 static void rxrpc_call_life_expired(unsigned long _call)
833 struct rxrpc_call *call = (struct rxrpc_call *) _call;
835 _enter("{%d}", call->debug_id);
837 rxrpc_see_call(call);
838 if (call->state >= RXRPC_CALL_COMPLETE)
841 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
842 rxrpc_queue_call(call);
846 * handle resend timer expiry
847 * - may not take call->state_lock as this can deadlock against del_timer_sync()
849 static void rxrpc_resend_time_expired(unsigned long _call)
851 struct rxrpc_call *call = (struct rxrpc_call *) _call;
853 _enter("{%d}", call->debug_id);
855 rxrpc_see_call(call);
856 if (call->state >= RXRPC_CALL_COMPLETE)
859 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
860 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
861 rxrpc_queue_call(call);
865 * handle ACK timer expiry
867 static void rxrpc_ack_time_expired(unsigned long _call)
869 struct rxrpc_call *call = (struct rxrpc_call *) _call;
871 _enter("{%d}", call->debug_id);
873 rxrpc_see_call(call);
874 if (call->state >= RXRPC_CALL_COMPLETE)
877 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
878 rxrpc_queue_call(call);