1 /* RxRPC packet reception
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
24 #include <net/af_rxrpc.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
30 static void rxrpc_proto_abort(const char *why,
31 struct rxrpc_call *call, rxrpc_seq_t seq)
33 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
34 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
35 rxrpc_queue_call(call);
40 * Do TCP-style congestion management [RFC 5681].
42 static void rxrpc_congestion_management(struct rxrpc_call *call,
44 struct rxrpc_ack_summary *summary)
46 enum rxrpc_congest_change change = rxrpc_cong_no_change;
47 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
48 unsigned int cumulative_acks = call->cong_cumul_acks;
49 unsigned int cwnd = call->cong_cwnd;
52 summary->flight_size =
53 (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
55 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
56 summary->retrans_timeo = true;
57 call->cong_ssthresh = max_t(unsigned int,
58 summary->flight_size / 2, 2);
60 if (cwnd > call->cong_ssthresh &&
61 call->cong_mode == RXRPC_CALL_SLOW_START) {
62 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
63 call->cong_tstamp = skb->tstamp;
68 cumulative_acks += summary->nr_new_acks;
69 cumulative_acks += summary->nr_rot_new_acks;
70 if (cumulative_acks > 255)
71 cumulative_acks = 255;
73 summary->mode = call->cong_mode;
74 summary->cwnd = call->cong_cwnd;
75 summary->ssthresh = call->cong_ssthresh;
76 summary->cumulative_acks = cumulative_acks;
77 summary->dup_acks = call->cong_dup_acks;
79 switch (call->cong_mode) {
80 case RXRPC_CALL_SLOW_START:
81 if (summary->nr_nacks > 0)
82 goto packet_loss_detected;
83 if (summary->cumulative_acks > 0)
85 if (cwnd > call->cong_ssthresh) {
86 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
87 call->cong_tstamp = skb->tstamp;
91 case RXRPC_CALL_CONGEST_AVOIDANCE:
92 if (summary->nr_nacks > 0)
93 goto packet_loss_detected;
95 /* We analyse the number of packets that get ACK'd per RTT
96 * period and increase the window if we managed to fill it.
98 if (call->peer->rtt_usage == 0)
100 if (ktime_before(skb->tstamp,
101 ktime_add_ns(call->cong_tstamp,
103 goto out_no_clear_ca;
104 change = rxrpc_cong_rtt_window_end;
105 call->cong_tstamp = skb->tstamp;
106 if (cumulative_acks >= cwnd)
110 case RXRPC_CALL_PACKET_LOSS:
111 if (summary->nr_nacks == 0)
112 goto resume_normality;
114 if (summary->new_low_nack) {
115 change = rxrpc_cong_new_low_nack;
116 call->cong_dup_acks = 1;
117 if (call->cong_extra > 1)
118 call->cong_extra = 1;
119 goto send_extra_data;
122 call->cong_dup_acks++;
123 if (call->cong_dup_acks < 3)
124 goto send_extra_data;
126 change = rxrpc_cong_begin_retransmission;
127 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
128 call->cong_ssthresh = max_t(unsigned int,
129 summary->flight_size / 2, 2);
130 cwnd = call->cong_ssthresh + 3;
131 call->cong_extra = 0;
132 call->cong_dup_acks = 0;
136 case RXRPC_CALL_FAST_RETRANSMIT:
137 if (!summary->new_low_nack) {
138 if (summary->nr_new_acks == 0)
140 call->cong_dup_acks++;
141 if (call->cong_dup_acks == 2) {
142 change = rxrpc_cong_retransmit_again;
143 call->cong_dup_acks = 0;
147 change = rxrpc_cong_progress;
148 cwnd = call->cong_ssthresh;
149 if (summary->nr_nacks == 0)
150 goto resume_normality;
160 change = rxrpc_cong_cleared_nacks;
161 call->cong_dup_acks = 0;
162 call->cong_extra = 0;
163 call->cong_tstamp = skb->tstamp;
164 if (cwnd <= call->cong_ssthresh)
165 call->cong_mode = RXRPC_CALL_SLOW_START;
167 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
171 if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
172 cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
173 call->cong_cwnd = cwnd;
174 call->cong_cumul_acks = cumulative_acks;
175 trace_rxrpc_congest(call, summary, sp->hdr.serial, change);
176 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
177 rxrpc_queue_call(call);
180 packet_loss_detected:
181 change = rxrpc_cong_saw_nack;
182 call->cong_mode = RXRPC_CALL_PACKET_LOSS;
183 call->cong_dup_acks = 0;
184 goto send_extra_data;
187 /* Send some previously unsent DATA if we have some to advance the ACK
190 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
191 RXRPC_TX_ANNO_LAST ||
192 summary->nr_acks != call->tx_top - call->tx_hard_ack) {
194 wake_up(&call->waitq);
196 goto out_no_clear_ca;
200 * Ping the other end to fill our RTT cache and to retrieve the rwind
201 * and MTU parameters.
203 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
206 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
207 ktime_t now = skb->tstamp;
209 if (call->peer->rtt_usage < 3 ||
210 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
211 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
213 rxrpc_propose_ack_ping_for_params);
217 * Apply a hard ACK by advancing the Tx window.
219 static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
220 struct rxrpc_ack_summary *summary)
222 struct sk_buff *skb, *list = NULL;
226 if (call->acks_lowest_nak == call->tx_hard_ack) {
227 call->acks_lowest_nak = to;
228 } else if (before_eq(call->acks_lowest_nak, to)) {
229 summary->new_low_nack = true;
230 call->acks_lowest_nak = to;
233 spin_lock(&call->lock);
235 while (before(call->tx_hard_ack, to)) {
237 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
238 skb = call->rxtx_buffer[ix];
239 annotation = call->rxtx_annotations[ix];
240 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
241 call->rxtx_buffer[ix] = NULL;
242 call->rxtx_annotations[ix] = 0;
246 if (annotation & RXRPC_TX_ANNO_LAST)
247 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
248 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
249 summary->nr_rot_new_acks++;
252 spin_unlock(&call->lock);
254 trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
255 rxrpc_transmit_rotate_last :
256 rxrpc_transmit_rotate));
257 wake_up(&call->waitq);
263 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
268 * End the transmission phase of a call.
270 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
271 * or a final ACK packet.
273 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
274 const char *abort_why)
277 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
279 write_lock(&call->state_lock);
281 switch (call->state) {
282 case RXRPC_CALL_CLIENT_SEND_REQUEST:
283 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
285 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
287 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
290 case RXRPC_CALL_SERVER_AWAIT_ACK:
291 __rxrpc_call_completed(call);
292 rxrpc_notify_socket(call);
299 write_unlock(&call->state_lock);
300 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
301 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true,
302 rxrpc_propose_ack_client_tx_end);
303 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
305 trace_rxrpc_transmit(call, rxrpc_transmit_end);
311 write_unlock(&call->state_lock);
312 kdebug("end_tx %s", rxrpc_call_states[call->state]);
313 rxrpc_proto_abort(abort_why, call, call->tx_top);
318 * Begin the reply reception phase of a call.
320 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
322 struct rxrpc_ack_summary summary = { 0 };
323 rxrpc_seq_t top = READ_ONCE(call->tx_top);
325 if (call->ackr_reason) {
326 spin_lock_bh(&call->lock);
327 call->ackr_reason = 0;
328 call->resend_at = call->expire_at;
329 call->ack_at = call->expire_at;
330 spin_unlock_bh(&call->lock);
331 rxrpc_set_timer(call, rxrpc_timer_init_for_reply);
334 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
335 rxrpc_rotate_tx_window(call, top, &summary);
336 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
337 rxrpc_proto_abort("TXL", call, top);
340 if (!rxrpc_end_tx_phase(call, true, "ETD"))
342 call->tx_phase = false;
347 * Scan a jumbo packet to validate its structure and to work out how many
348 * subpackets it contains.
350 * A jumbo packet is a collection of consecutive packets glued together with
351 * little headers between that indicate how to change the initial header for
354 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
355 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
358 static bool rxrpc_validate_jumbo(struct sk_buff *skb)
360 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
361 unsigned int offset = sp->offset;
362 unsigned int len = skb->len;
364 u8 flags = sp->hdr.flags;
368 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
370 if (flags & RXRPC_LAST_PACKET)
372 offset += RXRPC_JUMBO_DATALEN;
373 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
375 offset += sizeof(struct rxrpc_jumbo_header);
376 } while (flags & RXRPC_JUMBO_PACKET);
378 sp->nr_jumbo = nr_jumbo;
386 * Handle reception of a duplicate packet.
388 * We have to take care to avoid an attack here whereby we're given a series of
389 * jumbograms, each with a sequence number one before the preceding one and
390 * filled up to maximum UDP size. If they never send us the first packet in
391 * the sequence, they can cause us to have to hold on to around 2MiB of kernel
392 * space until the call times out.
394 * We limit the space usage by only accepting three duplicate jumbo packets per
395 * call. After that, we tell the other side we're no longer accepting jumbos
396 * (that information is encoded in the ACK packet).
398 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
399 u8 annotation, bool *_jumbo_bad)
401 /* Discard normal packets that are duplicates. */
405 /* Skip jumbo subpackets that are duplicates. When we've had three or
406 * more partially duplicate jumbo packets, we refuse to take any more
407 * jumbos for this call.
410 call->nr_jumbo_bad++;
416 * Process a DATA packet, adding the packet to the Rx ring.
418 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
421 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
422 unsigned int offset = sp->offset;
424 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
425 rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
426 bool immediate_ack = false, jumbo_bad = false, queued;
428 u8 ack = 0, flags, annotation = 0;
430 _enter("{%u,%u},{%u,%u}",
431 call->rx_hard_ack, call->rx_top, skb->len, seq);
433 _proto("Rx DATA %%%u { #%u f=%02x }",
434 sp->hdr.serial, seq, sp->hdr.flags);
436 if (call->state >= RXRPC_CALL_COMPLETE)
439 /* Received data implicitly ACKs all of the request packets we sent
440 * when we're acting as a client.
442 if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
443 call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
444 !rxrpc_receiving_reply(call))
447 call->ackr_prev_seq = seq;
449 hard_ack = READ_ONCE(call->rx_hard_ack);
450 if (after(seq, hard_ack + call->rx_winsize)) {
451 ack = RXRPC_ACK_EXCEEDS_WINDOW;
456 flags = sp->hdr.flags;
457 if (flags & RXRPC_JUMBO_PACKET) {
458 if (call->nr_jumbo_bad > 3) {
459 ack = RXRPC_ACK_NOSPACE;
468 ix = seq & RXRPC_RXTX_BUFF_MASK;
470 if (flags & RXRPC_JUMBO_PACKET)
471 len = RXRPC_JUMBO_DATALEN;
473 if (flags & RXRPC_LAST_PACKET) {
474 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
476 return rxrpc_proto_abort("LSN", call, seq);
478 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
479 after_eq(seq, call->rx_top))
480 return rxrpc_proto_abort("LSA", call, seq);
483 if (before_eq(seq, hard_ack)) {
484 ack = RXRPC_ACK_DUPLICATE;
489 if (flags & RXRPC_REQUEST_ACK && !ack) {
490 ack = RXRPC_ACK_REQUESTED;
494 if (call->rxtx_buffer[ix]) {
495 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
496 if (ack != RXRPC_ACK_DUPLICATE) {
497 ack = RXRPC_ACK_DUPLICATE;
500 immediate_ack = true;
504 /* Queue the packet. We use a couple of memory barriers here as need
505 * to make sure that rx_top is perceived to be set after the buffer
506 * pointer and that the buffer pointer is set after the annotation and
509 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
510 * and also rxrpc_fill_out_ack().
512 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
513 call->rxtx_annotations[ix] = annotation;
515 call->rxtx_buffer[ix] = skb;
516 if (after(seq, call->rx_top)) {
517 smp_store_release(&call->rx_top, seq);
518 } else if (before(seq, call->rx_top)) {
519 /* Send an immediate ACK if we fill in a hole */
521 ack = RXRPC_ACK_DELAY;
524 immediate_ack = true;
526 if (flags & RXRPC_LAST_PACKET) {
527 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
528 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
530 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
534 if (after_eq(seq, call->rx_expect_next)) {
535 if (after(seq, call->rx_expect_next)) {
536 _net("OOS %u > %u", seq, call->rx_expect_next);
537 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
540 call->rx_expect_next = seq + 1;
545 if (flags & RXRPC_JUMBO_PACKET) {
546 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
547 return rxrpc_proto_abort("XJF", call, seq);
548 offset += sizeof(struct rxrpc_jumbo_header);
552 if (flags & RXRPC_JUMBO_PACKET)
553 annotation |= RXRPC_RX_ANNO_JLAST;
554 if (after(seq, hard_ack + call->rx_winsize)) {
555 ack = RXRPC_ACK_EXCEEDS_WINDOW;
558 call->nr_jumbo_bad++;
564 _proto("Rx DATA Jumbo %%%u", serial);
568 if (queued && flags & RXRPC_LAST_PACKET && !ack) {
569 ack = RXRPC_ACK_DELAY;
575 rxrpc_propose_ACK(call, ack, skew, ack_serial,
577 rxrpc_propose_ack_input_data);
579 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
580 rxrpc_notify_socket(call);
585 * Process a requested ACK.
587 static void rxrpc_input_requested_ack(struct rxrpc_call *call,
589 rxrpc_serial_t orig_serial,
590 rxrpc_serial_t ack_serial)
592 struct rxrpc_skb_priv *sp;
597 for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
598 skb = call->rxtx_buffer[ix];
603 if (sp->hdr.serial != orig_serial)
606 sent_at = skb->tstamp;
612 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
613 orig_serial, ack_serial, sent_at, resp_time);
617 * Process a ping response.
619 static void rxrpc_input_ping_response(struct rxrpc_call *call,
621 rxrpc_serial_t orig_serial,
622 rxrpc_serial_t ack_serial)
624 rxrpc_serial_t ping_serial;
627 ping_time = call->ackr_ping_time;
629 ping_serial = call->ackr_ping;
631 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
632 before(orig_serial, ping_serial))
634 clear_bit(RXRPC_CALL_PINGING, &call->flags);
635 if (after(orig_serial, ping_serial))
638 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
639 orig_serial, ack_serial, ping_time, resp_time);
643 * Process the extra information that may be appended to an ACK packet
645 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
646 struct rxrpc_ackinfo *ackinfo)
648 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
649 struct rxrpc_peer *peer;
651 u32 rwind = ntohl(ackinfo->rwind);
653 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
655 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
656 rwind, ntohl(ackinfo->jumbo_max));
658 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
659 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
660 call->tx_winsize = rwind;
662 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
665 if (mtu < peer->maxdata) {
666 spin_lock_bh(&peer->lock);
668 peer->mtu = mtu + peer->hdrsize;
669 spin_unlock_bh(&peer->lock);
670 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
675 * Process individual soft ACKs.
677 * Each ACK in the array corresponds to one packet and can be either an ACK or
678 * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
679 * packets that lie beyond the end of the ACK list are scheduled for resend by
680 * the timer on the basis that the peer might just not have processed them at
681 * the time the ACK was sent.
683 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
684 rxrpc_seq_t seq, int nr_acks,
685 struct rxrpc_ack_summary *summary)
688 u8 annotation, anno_type;
690 for (; nr_acks > 0; nr_acks--, seq++) {
691 ix = seq & RXRPC_RXTX_BUFF_MASK;
692 annotation = call->rxtx_annotations[ix];
693 anno_type = annotation & RXRPC_TX_ANNO_MASK;
694 annotation &= ~RXRPC_TX_ANNO_MASK;
696 case RXRPC_ACK_TYPE_ACK:
698 if (anno_type == RXRPC_TX_ANNO_ACK)
700 summary->nr_new_acks++;
701 call->rxtx_annotations[ix] =
702 RXRPC_TX_ANNO_ACK | annotation;
704 case RXRPC_ACK_TYPE_NACK:
705 if (!summary->nr_nacks &&
706 call->acks_lowest_nak != seq) {
707 call->acks_lowest_nak = seq;
708 summary->new_low_nack = true;
711 if (anno_type == RXRPC_TX_ANNO_NAK)
713 summary->nr_new_nacks++;
714 if (anno_type == RXRPC_TX_ANNO_RETRANS)
716 call->rxtx_annotations[ix] =
717 RXRPC_TX_ANNO_NAK | annotation;
720 return rxrpc_proto_abort("SFT", call, 0);
726 * Process an ACK packet.
728 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
729 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
731 * A hard-ACK means that a packet has been processed and may be discarded; a
732 * soft-ACK means that the packet may be discarded and retransmission
733 * requested. A phase is complete when all packets are hard-ACK'd.
735 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
738 struct rxrpc_ack_summary summary = { 0 };
739 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
741 struct rxrpc_ackpacket ack;
742 struct rxrpc_ackinfo info;
743 u8 acks[RXRPC_MAXACKS];
745 rxrpc_serial_t acked_serial;
746 rxrpc_seq_t first_soft_ack, hard_ack;
751 if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) {
752 _debug("extraction failure");
753 return rxrpc_proto_abort("XAK", call, 0);
755 sp->offset += sizeof(buf.ack);
757 acked_serial = ntohl(buf.ack.serial);
758 first_soft_ack = ntohl(buf.ack.firstPacket);
759 hard_ack = first_soft_ack - 1;
760 nr_acks = buf.ack.nAcks;
761 summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
762 buf.ack.reason : RXRPC_ACK__INVALID);
764 trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks);
766 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
768 ntohs(buf.ack.maxSkew),
770 ntohl(buf.ack.previousPacket),
772 rxrpc_ack_names[summary.ack_reason],
775 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
776 rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
778 if (buf.ack.reason == RXRPC_ACK_REQUESTED)
779 rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
782 if (buf.ack.reason == RXRPC_ACK_PING) {
783 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
784 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
785 skew, sp->hdr.serial, true, true,
786 rxrpc_propose_ack_respond_to_ping);
787 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
788 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
789 skew, sp->hdr.serial, true, true,
790 rxrpc_propose_ack_respond_to_ack);
793 offset = sp->offset + nr_acks + 3;
794 if (skb->len >= offset + sizeof(buf.info)) {
795 if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
796 return rxrpc_proto_abort("XAI", call, 0);
797 rxrpc_input_ackinfo(call, skb, &buf.info);
800 if (first_soft_ack == 0)
801 return rxrpc_proto_abort("AK0", call, 0);
803 /* Ignore ACKs unless we are or have just been transmitting. */
804 switch (call->state) {
805 case RXRPC_CALL_CLIENT_SEND_REQUEST:
806 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
807 case RXRPC_CALL_SERVER_SEND_REPLY:
808 case RXRPC_CALL_SERVER_AWAIT_ACK:
814 /* Discard any out-of-order or duplicate ACKs. */
815 if (before_eq(sp->hdr.serial, call->acks_latest)) {
816 _debug("discard ACK %d <= %d",
817 sp->hdr.serial, call->acks_latest);
820 call->acks_latest_ts = skb->tstamp;
821 call->acks_latest = sp->hdr.serial;
823 if (before(hard_ack, call->tx_hard_ack) ||
824 after(hard_ack, call->tx_top))
825 return rxrpc_proto_abort("AKW", call, 0);
826 if (nr_acks > call->tx_top - hard_ack)
827 return rxrpc_proto_abort("AKN", call, 0);
829 if (after(hard_ack, call->tx_hard_ack))
830 rxrpc_rotate_tx_window(call, hard_ack, &summary);
833 if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0)
834 return rxrpc_proto_abort("XSA", call, 0);
835 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
839 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
840 rxrpc_end_tx_phase(call, false, "ETA");
844 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
845 RXRPC_TX_ANNO_LAST &&
846 summary.nr_acks == call->tx_top - hard_ack)
847 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
849 rxrpc_propose_ack_ping_for_lost_reply);
851 return rxrpc_congestion_management(call, skb, &summary);
855 * Process an ACKALL packet.
857 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
859 struct rxrpc_ack_summary summary = { 0 };
860 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
862 _proto("Rx ACKALL %%%u", sp->hdr.serial);
864 rxrpc_rotate_tx_window(call, call->tx_top, &summary);
865 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
866 rxrpc_end_tx_phase(call, false, "ETL");
870 * Process an ABORT packet.
872 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
874 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
876 u32 abort_code = RX_CALL_DEAD;
881 skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0)
882 abort_code = ntohl(wtmp);
884 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
886 if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
887 abort_code, ECONNABORTED))
888 rxrpc_notify_socket(call);
892 * Process an incoming call packet.
894 static void rxrpc_input_call_packet(struct rxrpc_call *call,
895 struct sk_buff *skb, u16 skew)
897 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
899 _enter("%p,%p", call, skb);
901 switch (sp->hdr.type) {
902 case RXRPC_PACKET_TYPE_DATA:
903 rxrpc_input_data(call, skb, skew);
906 case RXRPC_PACKET_TYPE_ACK:
907 rxrpc_input_ack(call, skb, skew);
910 case RXRPC_PACKET_TYPE_BUSY:
911 _proto("Rx BUSY %%%u", sp->hdr.serial);
913 /* Just ignore BUSY packets from the server; the retry and
914 * lifespan timers will take care of business. BUSY packets
915 * from the client don't make sense.
919 case RXRPC_PACKET_TYPE_ABORT:
920 rxrpc_input_abort(call, skb);
923 case RXRPC_PACKET_TYPE_ACKALL:
924 rxrpc_input_ackall(call, skb);
928 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
936 * post connection-level events to the connection
937 * - this includes challenges, responses, some aborts and call terminal packet
940 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
943 _enter("%p,%p", conn, skb);
945 skb_queue_tail(&conn->rx_queue, skb);
946 rxrpc_queue_conn(conn);
950 * post endpoint-level events to the local endpoint
951 * - this includes debug and version messages
953 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
956 _enter("%p,%p", local, skb);
958 skb_queue_tail(&local->event_queue, skb);
959 rxrpc_queue_local(local);
963 * put a packet up for transport-level abort
965 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
967 CHECK_SLAB_OKAY(&local->usage);
969 skb_queue_tail(&local->reject_queue, skb);
970 rxrpc_queue_local(local);
974 * Extract the wire header from a packet and translate the byte order.
977 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
979 struct rxrpc_wire_header whdr;
981 /* dig out the RxRPC connection details */
982 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
985 memset(sp, 0, sizeof(*sp));
986 sp->hdr.epoch = ntohl(whdr.epoch);
987 sp->hdr.cid = ntohl(whdr.cid);
988 sp->hdr.callNumber = ntohl(whdr.callNumber);
989 sp->hdr.seq = ntohl(whdr.seq);
990 sp->hdr.serial = ntohl(whdr.serial);
991 sp->hdr.flags = whdr.flags;
992 sp->hdr.type = whdr.type;
993 sp->hdr.userStatus = whdr.userStatus;
994 sp->hdr.securityIndex = whdr.securityIndex;
995 sp->hdr._rsvd = ntohs(whdr._rsvd);
996 sp->hdr.serviceId = ntohs(whdr.serviceId);
997 sp->offset = sizeof(whdr);
1002 * handle data received on the local endpoint
1003 * - may be called in interrupt context
1005 * The socket is locked by the caller and this prevents the socket from being
1006 * shut down and the local endpoint from going away, thus sk_user_data will not
1007 * be cleared until this function returns.
1009 void rxrpc_data_ready(struct sock *udp_sk)
1011 struct rxrpc_connection *conn;
1012 struct rxrpc_channel *chan;
1013 struct rxrpc_call *call;
1014 struct rxrpc_skb_priv *sp;
1015 struct rxrpc_local *local = udp_sk->sk_user_data;
1016 struct sk_buff *skb;
1017 unsigned int channel;
1020 _enter("%p", udp_sk);
1022 ASSERT(!irqs_disabled());
1024 skb = skb_recv_datagram(udp_sk, 0, 1, &ret);
1028 _debug("UDP socket error %d", ret);
1032 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1034 _net("recv skb %p", skb);
1036 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
1037 if (skb_checksum_complete(skb)) {
1038 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1039 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
1040 _leave(" [CSUM failed]");
1044 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
1046 /* The socket buffer we have is owned by UDP, with UDP's data all over
1047 * it, but we really want our own data there.
1050 sp = rxrpc_skb(skb);
1052 /* dig out the RxRPC connection details */
1053 if (rxrpc_extract_header(sp, skb) < 0)
1056 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
1058 if ((lose++ & 7) == 7) {
1059 trace_rxrpc_rx_lose(sp);
1060 rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
1065 trace_rxrpc_rx_packet(sp);
1067 _net("Rx RxRPC %s ep=%x call=%x:%x",
1068 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
1069 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
1071 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
1072 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
1073 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1077 switch (sp->hdr.type) {
1078 case RXRPC_PACKET_TYPE_VERSION:
1079 rxrpc_post_packet_to_local(local, skb);
1082 case RXRPC_PACKET_TYPE_BUSY:
1083 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
1086 case RXRPC_PACKET_TYPE_DATA:
1087 if (sp->hdr.callNumber == 0)
1089 if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
1090 !rxrpc_validate_jumbo(skb))
1097 conn = rxrpc_find_connection_rcu(local, skb);
1099 if (sp->hdr.securityIndex != conn->security_ix)
1100 goto wrong_security;
1102 if (sp->hdr.callNumber == 0) {
1103 /* Connection-level packet */
1104 _debug("CONN %p {%d}", conn, conn->debug_id);
1105 rxrpc_post_packet_to_conn(conn, skb);
1109 /* Note the serial number skew here */
1110 skew = (int)sp->hdr.serial - (int)conn->hi_serial;
1113 conn->hi_serial = sp->hdr.serial;
1116 skew = min(skew, 65535);
1119 /* Call-bound packets are routed by connection channel. */
1120 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
1121 chan = &conn->channels[channel];
1123 /* Ignore really old calls */
1124 if (sp->hdr.callNumber < chan->last_call)
1125 goto discard_unlock;
1127 if (sp->hdr.callNumber == chan->last_call) {
1128 /* For the previous service call, if completed successfully, we
1129 * discard all further packets.
1131 if (rxrpc_conn_is_service(conn) &&
1132 (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
1133 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
1134 goto discard_unlock;
1136 /* But otherwise we need to retransmit the final packet from
1137 * data cached in the connection record.
1139 rxrpc_post_packet_to_conn(conn, skb);
1143 call = rcu_dereference(chan->call);
1149 if (!call || atomic_read(&call->usage) == 0) {
1150 if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
1151 sp->hdr.callNumber == 0 ||
1152 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1153 goto bad_message_unlock;
1154 if (sp->hdr.seq != 1)
1155 goto discard_unlock;
1156 call = rxrpc_new_incoming_call(local, conn, skb);
1161 rxrpc_send_ping(call, skb, skew);
1164 rxrpc_input_call_packet(call, skb, skew);
1165 goto discard_unlock;
1170 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1172 trace_rxrpc_rx_done(0, 0);
1181 trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1182 RXKADINCONSISTENCY, EBADMSG);
1183 skb->priority = RXKADINCONSISTENCY;
1189 trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1190 RX_PROTOCOL_ERROR, EBADMSG);
1191 skb->priority = RX_PROTOCOL_ERROR;
1193 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
1195 trace_rxrpc_rx_done(skb->mark, skb->priority);
1196 rxrpc_reject_packet(local, skb);
1197 _leave(" [badmsg]");