rxrpc: checking for IS_ERR() instead of NULL
[cascardo/linux.git] / net / rxrpc / call_accept.c
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <net/sock.h>
24 #include <net/af_rxrpc.h>
25 #include <net/ip.h>
26 #include "ar-internal.h"
27
28 /*
29  * generate a connection-level abort
30  */
31 static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
32                       struct rxrpc_wire_header *whdr)
33 {
34         struct msghdr msg;
35         struct kvec iov[1];
36         size_t len;
37         int ret;
38
39         _enter("%d,,", local->debug_id);
40
41         whdr->type      = RXRPC_PACKET_TYPE_BUSY;
42         whdr->serial    = htonl(1);
43
44         msg.msg_name    = &srx->transport.sin;
45         msg.msg_namelen = sizeof(srx->transport.sin);
46         msg.msg_control = NULL;
47         msg.msg_controllen = 0;
48         msg.msg_flags   = 0;
49
50         iov[0].iov_base = whdr;
51         iov[0].iov_len  = sizeof(*whdr);
52
53         len = iov[0].iov_len;
54
55         _proto("Tx BUSY %%1");
56
57         ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
58         if (ret < 0) {
59                 _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
60                 return -EAGAIN;
61         }
62
63         _leave(" = 0");
64         return 0;
65 }
66
67 /*
68  * accept an incoming call that needs peer, transport and/or connection setting
69  * up
70  */
71 static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
72                                       struct rxrpc_sock *rx,
73                                       struct sk_buff *skb,
74                                       struct sockaddr_rxrpc *srx)
75 {
76         struct rxrpc_connection *conn;
77         struct rxrpc_transport *trans;
78         struct rxrpc_skb_priv *sp, *nsp;
79         struct rxrpc_peer *peer;
80         struct rxrpc_call *call;
81         struct sk_buff *notification;
82         int ret;
83
84         _enter("");
85
86         sp = rxrpc_skb(skb);
87
88         /* get a notification message to send to the server app */
89         notification = alloc_skb(0, GFP_NOFS);
90         if (!notification) {
91                 _debug("no memory");
92                 ret = -ENOMEM;
93                 goto error_nofree;
94         }
95         rxrpc_new_skb(notification);
96         notification->mark = RXRPC_SKB_MARK_NEW_CALL;
97
98         peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
99         if (!peer) {
100                 _debug("no peer");
101                 ret = -EBUSY;
102                 goto error;
103         }
104
105         trans = rxrpc_get_transport(local, peer, GFP_NOIO);
106         rxrpc_put_peer(peer);
107         if (IS_ERR(trans)) {
108                 _debug("no trans");
109                 ret = -EBUSY;
110                 goto error;
111         }
112
113         conn = rxrpc_incoming_connection(trans, &sp->hdr);
114         rxrpc_put_transport(trans);
115         if (IS_ERR(conn)) {
116                 _debug("no conn");
117                 ret = PTR_ERR(conn);
118                 goto error;
119         }
120
121         call = rxrpc_incoming_call(rx, conn, &sp->hdr);
122         rxrpc_put_connection(conn);
123         if (IS_ERR(call)) {
124                 _debug("no call");
125                 ret = PTR_ERR(call);
126                 goto error;
127         }
128
129         /* attach the call to the socket */
130         read_lock_bh(&local->services_lock);
131         if (rx->sk.sk_state == RXRPC_CLOSE)
132                 goto invalid_service;
133
134         write_lock(&rx->call_lock);
135         if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
136                 rxrpc_get_call(call);
137
138                 spin_lock(&call->conn->state_lock);
139                 if (sp->hdr.securityIndex > 0 &&
140                     call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
141                         _debug("await conn sec");
142                         list_add_tail(&call->accept_link, &rx->secureq);
143                         call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
144                         atomic_inc(&call->conn->usage);
145                         set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
146                         rxrpc_queue_conn(call->conn);
147                 } else {
148                         _debug("conn ready");
149                         call->state = RXRPC_CALL_SERVER_ACCEPTING;
150                         list_add_tail(&call->accept_link, &rx->acceptq);
151                         rxrpc_get_call(call);
152                         nsp = rxrpc_skb(notification);
153                         nsp->call = call;
154
155                         ASSERTCMP(atomic_read(&call->usage), >=, 3);
156
157                         _debug("notify");
158                         spin_lock(&call->lock);
159                         ret = rxrpc_queue_rcv_skb(call, notification, true,
160                                                   false);
161                         spin_unlock(&call->lock);
162                         notification = NULL;
163                         BUG_ON(ret < 0);
164                 }
165                 spin_unlock(&call->conn->state_lock);
166
167                 _debug("queued");
168         }
169         write_unlock(&rx->call_lock);
170
171         _debug("process");
172         rxrpc_fast_process_packet(call, skb);
173
174         _debug("done");
175         read_unlock_bh(&local->services_lock);
176         rxrpc_free_skb(notification);
177         rxrpc_put_call(call);
178         _leave(" = 0");
179         return 0;
180
181 invalid_service:
182         _debug("invalid");
183         read_unlock_bh(&local->services_lock);
184
185         read_lock_bh(&call->state_lock);
186         if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
187             !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
188                 rxrpc_get_call(call);
189                 rxrpc_queue_call(call);
190         }
191         read_unlock_bh(&call->state_lock);
192         rxrpc_put_call(call);
193         ret = -ECONNREFUSED;
194 error:
195         rxrpc_free_skb(notification);
196 error_nofree:
197         _leave(" = %d", ret);
198         return ret;
199 }
200
201 /*
202  * accept incoming calls that need peer, transport and/or connection setting up
203  * - the packets we get are all incoming client DATA packets that have seq == 1
204  */
205 void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
206 {
207         struct rxrpc_skb_priv *sp;
208         struct sockaddr_rxrpc srx;
209         struct rxrpc_sock *rx;
210         struct rxrpc_wire_header whdr;
211         struct sk_buff *skb;
212         int ret;
213
214         _enter("%d", local->debug_id);
215
216         skb = skb_dequeue(&local->accept_queue);
217         if (!skb) {
218                 _leave("\n");
219                 return;
220         }
221
222         _net("incoming call skb %p", skb);
223
224         sp = rxrpc_skb(skb);
225
226         /* Set up a response packet header in case we need it */
227         whdr.epoch      = htonl(sp->hdr.epoch);
228         whdr.cid        = htonl(sp->hdr.cid);
229         whdr.callNumber = htonl(sp->hdr.callNumber);
230         whdr.seq        = htonl(sp->hdr.seq);
231         whdr.serial     = 0;
232         whdr.flags      = 0;
233         whdr.type       = 0;
234         whdr.userStatus = 0;
235         whdr.securityIndex = sp->hdr.securityIndex;
236         whdr._rsvd      = 0;
237         whdr.serviceId  = htons(sp->hdr.serviceId);
238
239         /* determine the remote address */
240         memset(&srx, 0, sizeof(srx));
241         srx.srx_family = AF_RXRPC;
242         srx.transport.family = local->srx.transport.family;
243         srx.transport_type = local->srx.transport_type;
244         switch (srx.transport.family) {
245         case AF_INET:
246                 srx.transport_len = sizeof(struct sockaddr_in);
247                 srx.transport.sin.sin_port = udp_hdr(skb)->source;
248                 srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
249                 break;
250         default:
251                 goto busy;
252         }
253
254         /* get the socket providing the service */
255         read_lock_bh(&local->services_lock);
256         list_for_each_entry(rx, &local->services, listen_link) {
257                 if (rx->srx.srx_service == sp->hdr.serviceId &&
258                     rx->sk.sk_state != RXRPC_CLOSE)
259                         goto found_service;
260         }
261         read_unlock_bh(&local->services_lock);
262         goto invalid_service;
263
264 found_service:
265         _debug("found service %hd", rx->srx.srx_service);
266         if (sk_acceptq_is_full(&rx->sk))
267                 goto backlog_full;
268         sk_acceptq_added(&rx->sk);
269         sock_hold(&rx->sk);
270         read_unlock_bh(&local->services_lock);
271
272         ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
273         if (ret < 0)
274                 sk_acceptq_removed(&rx->sk);
275         sock_put(&rx->sk);
276         switch (ret) {
277         case -ECONNRESET: /* old calls are ignored */
278         case -ECONNABORTED: /* aborted calls are reaborted or ignored */
279         case 0:
280                 return;
281         case -ECONNREFUSED:
282                 goto invalid_service;
283         case -EBUSY:
284                 goto busy;
285         case -EKEYREJECTED:
286                 goto security_mismatch;
287         default:
288                 BUG();
289         }
290
291 backlog_full:
292         read_unlock_bh(&local->services_lock);
293 busy:
294         rxrpc_busy(local, &srx, &whdr);
295         rxrpc_free_skb(skb);
296         return;
297
298 invalid_service:
299         skb->priority = RX_INVALID_OPERATION;
300         rxrpc_reject_packet(local, skb);
301         return;
302
303         /* can't change connection security type mid-flow */
304 security_mismatch:
305         skb->priority = RX_PROTOCOL_ERROR;
306         rxrpc_reject_packet(local, skb);
307         return;
308 }
309
310 /*
311  * handle acceptance of a call by userspace
312  * - assign the user call ID to the call at the front of the queue
313  */
314 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
315                                      unsigned long user_call_ID)
316 {
317         struct rxrpc_call *call;
318         struct rb_node *parent, **pp;
319         int ret;
320
321         _enter(",%lx", user_call_ID);
322
323         ASSERT(!irqs_disabled());
324
325         write_lock(&rx->call_lock);
326
327         ret = -ENODATA;
328         if (list_empty(&rx->acceptq))
329                 goto out;
330
331         /* check the user ID isn't already in use */
332         ret = -EBADSLT;
333         pp = &rx->calls.rb_node;
334         parent = NULL;
335         while (*pp) {
336                 parent = *pp;
337                 call = rb_entry(parent, struct rxrpc_call, sock_node);
338
339                 if (user_call_ID < call->user_call_ID)
340                         pp = &(*pp)->rb_left;
341                 else if (user_call_ID > call->user_call_ID)
342                         pp = &(*pp)->rb_right;
343                 else
344                         goto out;
345         }
346
347         /* dequeue the first call and check it's still valid */
348         call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
349         list_del_init(&call->accept_link);
350         sk_acceptq_removed(&rx->sk);
351
352         write_lock_bh(&call->state_lock);
353         switch (call->state) {
354         case RXRPC_CALL_SERVER_ACCEPTING:
355                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
356                 break;
357         case RXRPC_CALL_REMOTELY_ABORTED:
358         case RXRPC_CALL_LOCALLY_ABORTED:
359                 ret = -ECONNABORTED;
360                 goto out_release;
361         case RXRPC_CALL_NETWORK_ERROR:
362                 ret = call->conn->error;
363                 goto out_release;
364         case RXRPC_CALL_DEAD:
365                 ret = -ETIME;
366                 goto out_discard;
367         default:
368                 BUG();
369         }
370
371         /* formalise the acceptance */
372         call->user_call_ID = user_call_ID;
373         rb_link_node(&call->sock_node, parent, pp);
374         rb_insert_color(&call->sock_node, &rx->calls);
375         if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
376                 BUG();
377         if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
378                 BUG();
379         rxrpc_queue_call(call);
380
381         rxrpc_get_call(call);
382         write_unlock_bh(&call->state_lock);
383         write_unlock(&rx->call_lock);
384         _leave(" = %p{%d}", call, call->debug_id);
385         return call;
386
387         /* if the call is already dying or dead, then we leave the socket's ref
388          * on it to be released by rxrpc_dead_call_expired() as induced by
389          * rxrpc_release_call() */
390 out_release:
391         _debug("release %p", call);
392         if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
393             !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
394                 rxrpc_queue_call(call);
395 out_discard:
396         write_unlock_bh(&call->state_lock);
397         _debug("discard %p", call);
398 out:
399         write_unlock(&rx->call_lock);
400         _leave(" = %d", ret);
401         return ERR_PTR(ret);
402 }
403
404 /*
405  * Handle rejection of a call by userspace
406  * - reject the call at the front of the queue
407  */
408 int rxrpc_reject_call(struct rxrpc_sock *rx)
409 {
410         struct rxrpc_call *call;
411         int ret;
412
413         _enter("");
414
415         ASSERT(!irqs_disabled());
416
417         write_lock(&rx->call_lock);
418
419         ret = -ENODATA;
420         if (list_empty(&rx->acceptq))
421                 goto out;
422
423         /* dequeue the first call and check it's still valid */
424         call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
425         list_del_init(&call->accept_link);
426         sk_acceptq_removed(&rx->sk);
427
428         write_lock_bh(&call->state_lock);
429         switch (call->state) {
430         case RXRPC_CALL_SERVER_ACCEPTING:
431                 call->state = RXRPC_CALL_SERVER_BUSY;
432                 if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
433                         rxrpc_queue_call(call);
434                 ret = 0;
435                 goto out_release;
436         case RXRPC_CALL_REMOTELY_ABORTED:
437         case RXRPC_CALL_LOCALLY_ABORTED:
438                 ret = -ECONNABORTED;
439                 goto out_release;
440         case RXRPC_CALL_NETWORK_ERROR:
441                 ret = call->conn->error;
442                 goto out_release;
443         case RXRPC_CALL_DEAD:
444                 ret = -ETIME;
445                 goto out_discard;
446         default:
447                 BUG();
448         }
449
450         /* if the call is already dying or dead, then we leave the socket's ref
451          * on it to be released by rxrpc_dead_call_expired() as induced by
452          * rxrpc_release_call() */
453 out_release:
454         _debug("release %p", call);
455         if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
456             !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
457                 rxrpc_queue_call(call);
458 out_discard:
459         write_unlock_bh(&call->state_lock);
460         _debug("discard %p", call);
461 out:
462         write_unlock(&rx->call_lock);
463         _leave(" = %d", ret);
464         return ret;
465 }
466
467 /**
468  * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
469  * @sock: The socket on which the impending call is waiting
470  * @user_call_ID: The tag to attach to the call
471  *
472  * Allow a kernel service to accept an incoming call, assuming the incoming
473  * call is still valid.
474  */
475 struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
476                                             unsigned long user_call_ID)
477 {
478         struct rxrpc_call *call;
479
480         _enter(",%lx", user_call_ID);
481         call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
482         _leave(" = %p", call);
483         return call;
484 }
485 EXPORT_SYMBOL(rxrpc_kernel_accept_call);
486
487 /**
488  * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
489  * @sock: The socket on which the impending call is waiting
490  *
491  * Allow a kernel service to reject an incoming call with a BUSY message,
492  * assuming the incoming call is still valid.
493  */
494 int rxrpc_kernel_reject_call(struct socket *sock)
495 {
496         int ret;
497
498         _enter("");
499         ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
500         _leave(" = %d", ret);
501         return ret;
502 }
503 EXPORT_SYMBOL(rxrpc_kernel_reject_call);