917db48d7f599775e160ce952aae1188441b26b1
[cascardo/linux.git] / net / rxrpc / conn_client.c
1 /* Client connection-specific management code.
2  *
3  * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/idr.h>
16 #include <linux/timer.h>
17 #include "ar-internal.h"
18
19 /*
20  * We use machine-unique IDs for our client connections.
21  */
22 DEFINE_IDR(rxrpc_client_conn_ids);
23 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
24
25 /*
26  * Get a connection ID and epoch for a client connection from the global pool.
27  * The connection struct pointer is then recorded in the idr radix tree.  The
28  * epoch is changed if this wraps.
29  *
30  * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31  * widely scattered throughout the number space, so we shall need to retire
32  * connections that have, say, an ID more than four times the maximum number of
33  * client conns away from the current allocation point to try and keep the IDs
34  * concentrated.  We will also need to retire connections from an old epoch.
35  */
36 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
37                                           gfp_t gfp)
38 {
39         u32 epoch;
40         int id;
41
42         _enter("");
43
44         idr_preload(gfp);
45         spin_lock(&rxrpc_conn_id_lock);
46
47         epoch = rxrpc_epoch;
48
49         /* We could use idr_alloc_cyclic() here, but we really need to know
50          * when the thing wraps so that we can advance the epoch.
51          */
52         if (rxrpc_client_conn_ids.cur == 0)
53                 rxrpc_client_conn_ids.cur = 1;
54         id = idr_alloc(&rxrpc_client_conn_ids, conn,
55                        rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
56         if (id < 0) {
57                 if (id != -ENOSPC)
58                         goto error;
59                 id = idr_alloc(&rxrpc_client_conn_ids, conn,
60                                1, 0x40000000, GFP_NOWAIT);
61                 if (id < 0)
62                         goto error;
63                 epoch++;
64                 rxrpc_epoch = epoch;
65         }
66         rxrpc_client_conn_ids.cur = id + 1;
67
68         spin_unlock(&rxrpc_conn_id_lock);
69         idr_preload_end();
70
71         conn->proto.epoch = epoch;
72         conn->proto.cid = id << RXRPC_CIDSHIFT;
73         set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
74         _leave(" [CID %x:%x]", epoch, conn->proto.cid);
75         return 0;
76
77 error:
78         spin_unlock(&rxrpc_conn_id_lock);
79         idr_preload_end();
80         _leave(" = %d", id);
81         return id;
82 }
83
84 /*
85  * Release a connection ID for a client connection from the global pool.
86  */
87 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
88 {
89         if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
90                 spin_lock(&rxrpc_conn_id_lock);
91                 idr_remove(&rxrpc_client_conn_ids,
92                            conn->proto.cid >> RXRPC_CIDSHIFT);
93                 spin_unlock(&rxrpc_conn_id_lock);
94         }
95 }
96
97 /*
98  * Destroy the client connection ID tree.
99  */
100 void rxrpc_destroy_client_conn_ids(void)
101 {
102         struct rxrpc_connection *conn;
103         int id;
104
105         if (!idr_is_empty(&rxrpc_client_conn_ids)) {
106                 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
107                         pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
108                                conn, atomic_read(&conn->usage));
109                 }
110                 BUG();
111         }
112
113         idr_destroy(&rxrpc_client_conn_ids);
114 }
115
116 /*
117  * Allocate a client connection.  The caller must take care to clear any
118  * padding bytes in *cp.
119  */
120 static struct rxrpc_connection *
121 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
122 {
123         struct rxrpc_connection *conn;
124         int ret;
125
126         _enter("");
127
128         conn = rxrpc_alloc_connection(gfp);
129         if (!conn) {
130                 _leave(" = -ENOMEM");
131                 return ERR_PTR(-ENOMEM);
132         }
133
134         conn->params            = *cp;
135         conn->proto.epoch       = rxrpc_epoch;
136         conn->proto.cid         = 0;
137         conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
138         conn->state             = RXRPC_CONN_CLIENT;
139
140         ret = rxrpc_get_client_connection_id(conn, gfp);
141         if (ret < 0)
142                 goto error_0;
143
144         ret = rxrpc_init_client_conn_security(conn);
145         if (ret < 0)
146                 goto error_1;
147
148         ret = conn->security->prime_packet_security(conn);
149         if (ret < 0)
150                 goto error_2;
151
152         write_lock(&rxrpc_connection_lock);
153         list_add_tail(&conn->link, &rxrpc_connections);
154         write_unlock(&rxrpc_connection_lock);
155
156         /* We steal the caller's peer ref. */
157         cp->peer = NULL;
158         rxrpc_get_local(conn->params.local);
159         key_get(conn->params.key);
160
161         _leave(" = %p", conn);
162         return conn;
163
164 error_2:
165         conn->security->clear(conn);
166 error_1:
167         rxrpc_put_client_connection_id(conn);
168 error_0:
169         kfree(conn);
170         _leave(" = %d", ret);
171         return ERR_PTR(ret);
172 }
173
174 /*
175  * find a connection for a call
176  * - called in process context with IRQs enabled
177  */
178 int rxrpc_connect_call(struct rxrpc_call *call,
179                        struct rxrpc_conn_parameters *cp,
180                        struct sockaddr_rxrpc *srx,
181                        gfp_t gfp)
182 {
183         struct rxrpc_connection *conn, *candidate = NULL;
184         struct rxrpc_local *local = cp->local;
185         struct rb_node *p, **pp, *parent;
186         long diff;
187         int chan;
188
189         DECLARE_WAITQUEUE(myself, current);
190
191         _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
192
193         cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
194         if (!cp->peer)
195                 return -ENOMEM;
196
197         if (!cp->exclusive) {
198                 /* Search for a existing client connection unless this is going
199                  * to be a connection that's used exclusively for a single call.
200                  */
201                 _debug("search 1");
202                 spin_lock(&local->client_conns_lock);
203                 p = local->client_conns.rb_node;
204                 while (p) {
205                         conn = rb_entry(p, struct rxrpc_connection, client_node);
206
207 #define cmp(X) ((long)conn->params.X - (long)cp->X)
208                         diff = (cmp(peer) ?:
209                                 cmp(key) ?:
210                                 cmp(security_level));
211                         if (diff < 0)
212                                 p = p->rb_left;
213                         else if (diff > 0)
214                                 p = p->rb_right;
215                         else
216                                 goto found_extant_conn;
217                 }
218                 spin_unlock(&local->client_conns_lock);
219         }
220
221         /* We didn't find a connection or we want an exclusive one. */
222         _debug("get new conn");
223         candidate = rxrpc_alloc_client_connection(cp, gfp);
224         if (!candidate) {
225                 _leave(" = -ENOMEM");
226                 return -ENOMEM;
227         }
228
229         if (cp->exclusive) {
230                 /* Assign the call on an exclusive connection to channel 0 and
231                  * don't add the connection to the endpoint's shareable conn
232                  * lookup tree.
233                  */
234                 _debug("exclusive chan 0");
235                 conn = candidate;
236                 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
237                 spin_lock(&conn->channel_lock);
238                 chan = 0;
239                 goto found_channel;
240         }
241
242         /* We need to redo the search before attempting to add a new connection
243          * lest we race with someone else adding a conflicting instance.
244          */
245         _debug("search 2");
246         spin_lock(&local->client_conns_lock);
247
248         pp = &local->client_conns.rb_node;
249         parent = NULL;
250         while (*pp) {
251                 parent = *pp;
252                 conn = rb_entry(parent, struct rxrpc_connection, client_node);
253
254                 diff = (cmp(peer) ?:
255                         cmp(key) ?:
256                         cmp(security_level));
257                 if (diff < 0)
258                         pp = &(*pp)->rb_left;
259                 else if (diff > 0)
260                         pp = &(*pp)->rb_right;
261                 else
262                         goto found_extant_conn;
263         }
264
265         /* The second search also failed; simply add the new connection with
266          * the new call in channel 0.  Note that we need to take the channel
267          * lock before dropping the client conn lock.
268          */
269         _debug("new conn");
270         set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
271         rb_link_node(&candidate->client_node, parent, pp);
272         rb_insert_color(&candidate->client_node, &local->client_conns);
273 attached:
274         conn = candidate;
275         candidate = NULL;
276
277         atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
278         spin_lock(&conn->channel_lock);
279         spin_unlock(&local->client_conns_lock);
280         chan = 0;
281
282 found_channel:
283         _debug("found chan");
284         call->conn      = conn;
285         call->channel   = chan;
286         call->epoch     = conn->proto.epoch;
287         call->cid       = conn->proto.cid | chan;
288         call->call_id   = ++conn->channels[chan].call_counter;
289         conn->channels[chan].call_id = call->call_id;
290         rcu_assign_pointer(conn->channels[chan].call, call);
291
292         _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
293
294         spin_unlock(&conn->channel_lock);
295         rxrpc_put_peer(cp->peer);
296         cp->peer = NULL;
297         _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
298         return 0;
299
300         /* We found a potentially suitable connection already in existence.  If
301          * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
302          * reaper), discard any candidate we may have allocated, and try to get
303          * a channel on this one, otherwise we have to replace it.
304          */
305 found_extant_conn:
306         _debug("found conn");
307         if (!rxrpc_get_connection_maybe(conn)) {
308                 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
309                 rb_replace_node(&conn->client_node,
310                                 &candidate->client_node,
311                                 &local->client_conns);
312                 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
313                 goto attached;
314         }
315
316         spin_unlock(&local->client_conns_lock);
317
318         rxrpc_put_connection(candidate);
319
320         if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
321                 if (!gfpflags_allow_blocking(gfp)) {
322                         rxrpc_put_connection(conn);
323                         _leave(" = -EAGAIN");
324                         return -EAGAIN;
325                 }
326
327                 add_wait_queue(&conn->channel_wq, &myself);
328                 for (;;) {
329                         set_current_state(TASK_INTERRUPTIBLE);
330                         if (atomic_add_unless(&conn->avail_chans, -1, 0))
331                                 break;
332                         if (signal_pending(current))
333                                 goto interrupted;
334                         schedule();
335                 }
336                 remove_wait_queue(&conn->channel_wq, &myself);
337                 __set_current_state(TASK_RUNNING);
338         }
339
340         /* The connection allegedly now has a free channel and we can now
341          * attach the call to it.
342          */
343         spin_lock(&conn->channel_lock);
344
345         for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
346                 if (!conn->channels[chan].call)
347                         goto found_channel;
348         BUG();
349
350 interrupted:
351         remove_wait_queue(&conn->channel_wq, &myself);
352         __set_current_state(TASK_RUNNING);
353         rxrpc_put_connection(conn);
354         rxrpc_put_peer(cp->peer);
355         cp->peer = NULL;
356         _leave(" = -ERESTARTSYS");
357         return -ERESTARTSYS;
358 }
359
360 /*
361  * Remove a client connection from the local endpoint's tree, thereby removing
362  * it as a target for reuse for new client calls.
363  */
364 void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
365 {
366         struct rxrpc_local *local = conn->params.local;
367
368         spin_lock(&local->client_conns_lock);
369         if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
370                 rb_erase(&conn->client_node, &local->client_conns);
371         spin_unlock(&local->client_conns_lock);
372
373         rxrpc_put_client_connection_id(conn);
374 }