1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/idr.h>
16 #include <linux/timer.h>
17 #include "ar-internal.h"
20 * We use machine-unique IDs for our client connections.
22 DEFINE_IDR(rxrpc_client_conn_ids);
23 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
26 * Get a connection ID and epoch for a client connection from the global pool.
27 * The connection struct pointer is then recorded in the idr radix tree. The
28 * epoch is changed if this wraps.
30 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31 * widely scattered throughout the number space, so we shall need to retire
32 * connections that have, say, an ID more than four times the maximum number of
33 * client conns away from the current allocation point to try and keep the IDs
34 * concentrated. We will also need to retire connections from an old epoch.
36 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
45 spin_lock(&rxrpc_conn_id_lock);
49 /* We could use idr_alloc_cyclic() here, but we really need to know
50 * when the thing wraps so that we can advance the epoch.
52 if (rxrpc_client_conn_ids.cur == 0)
53 rxrpc_client_conn_ids.cur = 1;
54 id = idr_alloc(&rxrpc_client_conn_ids, conn,
55 rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
59 id = idr_alloc(&rxrpc_client_conn_ids, conn,
60 1, 0x40000000, GFP_NOWAIT);
66 rxrpc_client_conn_ids.cur = id + 1;
68 spin_unlock(&rxrpc_conn_id_lock);
71 conn->proto.epoch = epoch;
72 conn->proto.cid = id << RXRPC_CIDSHIFT;
73 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
74 _leave(" [CID %x:%x]", epoch, conn->proto.cid);
78 spin_unlock(&rxrpc_conn_id_lock);
85 * Release a connection ID for a client connection from the global pool.
87 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
89 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
90 spin_lock(&rxrpc_conn_id_lock);
91 idr_remove(&rxrpc_client_conn_ids,
92 conn->proto.cid >> RXRPC_CIDSHIFT);
93 spin_unlock(&rxrpc_conn_id_lock);
98 * Destroy the client connection ID tree.
100 void rxrpc_destroy_client_conn_ids(void)
102 struct rxrpc_connection *conn;
105 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
106 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
107 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
108 conn, atomic_read(&conn->usage));
113 idr_destroy(&rxrpc_client_conn_ids);
117 * Allocate a client connection. The caller must take care to clear any
118 * padding bytes in *cp.
120 static struct rxrpc_connection *
121 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
123 struct rxrpc_connection *conn;
128 conn = rxrpc_alloc_connection(gfp);
130 _leave(" = -ENOMEM");
131 return ERR_PTR(-ENOMEM);
135 conn->proto.epoch = rxrpc_epoch;
137 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
138 conn->state = RXRPC_CONN_CLIENT;
140 ret = rxrpc_get_client_connection_id(conn, gfp);
144 ret = rxrpc_init_client_conn_security(conn);
148 ret = conn->security->prime_packet_security(conn);
152 write_lock(&rxrpc_connection_lock);
153 list_add_tail(&conn->link, &rxrpc_connections);
154 write_unlock(&rxrpc_connection_lock);
156 /* We steal the caller's peer ref. */
158 rxrpc_get_local(conn->params.local);
159 key_get(conn->params.key);
161 _leave(" = %p", conn);
165 conn->security->clear(conn);
167 rxrpc_put_client_connection_id(conn);
170 _leave(" = %d", ret);
175 * find a connection for a call
176 * - called in process context with IRQs enabled
178 int rxrpc_connect_call(struct rxrpc_call *call,
179 struct rxrpc_conn_parameters *cp,
180 struct sockaddr_rxrpc *srx,
183 struct rxrpc_connection *conn, *candidate = NULL;
184 struct rxrpc_local *local = cp->local;
185 struct rb_node *p, **pp, *parent;
189 DECLARE_WAITQUEUE(myself, current);
191 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
193 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
197 if (!cp->exclusive) {
198 /* Search for a existing client connection unless this is going
199 * to be a connection that's used exclusively for a single call.
202 spin_lock(&local->client_conns_lock);
203 p = local->client_conns.rb_node;
205 conn = rb_entry(p, struct rxrpc_connection, client_node);
207 #define cmp(X) ((long)conn->params.X - (long)cp->X)
210 cmp(security_level));
216 goto found_extant_conn;
218 spin_unlock(&local->client_conns_lock);
221 /* We didn't find a connection or we want an exclusive one. */
222 _debug("get new conn");
223 candidate = rxrpc_alloc_client_connection(cp, gfp);
225 _leave(" = -ENOMEM");
230 /* Assign the call on an exclusive connection to channel 0 and
231 * don't add the connection to the endpoint's shareable conn
234 _debug("exclusive chan 0");
236 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
237 spin_lock(&conn->channel_lock);
242 /* We need to redo the search before attempting to add a new connection
243 * lest we race with someone else adding a conflicting instance.
246 spin_lock(&local->client_conns_lock);
248 pp = &local->client_conns.rb_node;
252 conn = rb_entry(parent, struct rxrpc_connection, client_node);
256 cmp(security_level));
258 pp = &(*pp)->rb_left;
260 pp = &(*pp)->rb_right;
262 goto found_extant_conn;
265 /* The second search also failed; simply add the new connection with
266 * the new call in channel 0. Note that we need to take the channel
267 * lock before dropping the client conn lock.
270 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
271 rb_link_node(&candidate->client_node, parent, pp);
272 rb_insert_color(&candidate->client_node, &local->client_conns);
277 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
278 spin_lock(&conn->channel_lock);
279 spin_unlock(&local->client_conns_lock);
283 _debug("found chan");
285 call->channel = chan;
286 call->epoch = conn->proto.epoch;
287 call->cid = conn->proto.cid | chan;
288 call->call_id = ++conn->channels[chan].call_counter;
289 conn->channels[chan].call_id = call->call_id;
290 rcu_assign_pointer(conn->channels[chan].call, call);
292 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
294 spin_unlock(&conn->channel_lock);
295 rxrpc_put_peer(cp->peer);
297 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
300 /* We found a potentially suitable connection already in existence. If
301 * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
302 * reaper), discard any candidate we may have allocated, and try to get
303 * a channel on this one, otherwise we have to replace it.
306 _debug("found conn");
307 if (!rxrpc_get_connection_maybe(conn)) {
308 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
309 rb_replace_node(&conn->client_node,
310 &candidate->client_node,
311 &local->client_conns);
312 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
316 spin_unlock(&local->client_conns_lock);
318 rxrpc_put_connection(candidate);
320 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
321 if (!gfpflags_allow_blocking(gfp)) {
322 rxrpc_put_connection(conn);
323 _leave(" = -EAGAIN");
327 add_wait_queue(&conn->channel_wq, &myself);
329 set_current_state(TASK_INTERRUPTIBLE);
330 if (atomic_add_unless(&conn->avail_chans, -1, 0))
332 if (signal_pending(current))
336 remove_wait_queue(&conn->channel_wq, &myself);
337 __set_current_state(TASK_RUNNING);
340 /* The connection allegedly now has a free channel and we can now
341 * attach the call to it.
343 spin_lock(&conn->channel_lock);
345 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
346 if (!conn->channels[chan].call)
351 remove_wait_queue(&conn->channel_wq, &myself);
352 __set_current_state(TASK_RUNNING);
353 rxrpc_put_connection(conn);
354 rxrpc_put_peer(cp->peer);
356 _leave(" = -ERESTARTSYS");
361 * Remove a client connection from the local endpoint's tree, thereby removing
362 * it as a target for reuse for new client calls.
364 void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
366 struct rxrpc_local *local = conn->params.local;
368 spin_lock(&local->client_conns_lock);
369 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
370 rb_erase(&conn->client_node, &local->client_conns);
371 spin_unlock(&local->client_conns_lock);
373 rxrpc_put_client_connection_id(conn);