2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
50 struct tipc_node_map *nm_b,
51 struct tipc_node_map *nm_diff);
52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
55 static void tipc_bclink_lock(struct net *net)
57 struct tipc_net *tn = net_generic(net, tipc_net_id);
59 spin_lock_bh(&tn->bclink->lock);
62 static void tipc_bclink_unlock(struct net *net)
64 struct tipc_net *tn = net_generic(net, tipc_net_id);
65 struct tipc_node *node = NULL;
67 if (likely(!tn->bclink->flags)) {
68 spin_unlock_bh(&tn->bclink->lock);
72 if (tn->bclink->flags & TIPC_BCLINK_RESET) {
73 tn->bclink->flags &= ~TIPC_BCLINK_RESET;
74 node = tipc_bclink_retransmit_to(net);
76 spin_unlock_bh(&tn->bclink->lock);
79 tipc_link_reset_all(node);
82 void tipc_bclink_input(struct net *net)
84 struct tipc_net *tn = net_generic(net, tipc_net_id);
86 tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
89 uint tipc_bclink_get_mtu(void)
91 return MAX_PKT_DEFAULT_MCAST;
94 void tipc_bclink_set_flags(struct net *net, unsigned int flags)
96 struct tipc_net *tn = net_generic(net, tipc_net_id);
98 tn->bclink->flags |= flags;
101 static u32 bcbuf_acks(struct sk_buff *buf)
103 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
106 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
108 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
111 static void bcbuf_decr_acks(struct sk_buff *buf)
113 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
116 void tipc_bclink_add_node(struct net *net, u32 addr)
118 struct tipc_net *tn = net_generic(net, tipc_net_id);
120 tipc_bclink_lock(net);
121 tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
122 tipc_bclink_unlock(net);
125 void tipc_bclink_remove_node(struct net *net, u32 addr)
127 struct tipc_net *tn = net_generic(net, tipc_net_id);
129 tipc_bclink_lock(net);
130 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
131 tipc_bclink_unlock(net);
134 static void bclink_set_last_sent(struct net *net)
136 struct tipc_net *tn = net_generic(net, tipc_net_id);
137 struct tipc_link *bcl = tn->bcl;
138 struct sk_buff *skb = skb_peek(&bcl->backlogq);
141 bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
143 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
146 u32 tipc_bclink_get_last_sent(struct net *net)
148 struct tipc_net *tn = net_generic(net, tipc_net_id);
150 return tn->bcl->fsm_msg_cnt;
153 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
155 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
156 seqno : node->bclink.last_sent;
161 * tipc_bclink_retransmit_to - get most recent node to request retransmission
163 * Called with bclink_lock locked
165 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
167 struct tipc_net *tn = net_generic(net, tipc_net_id);
169 return tn->bclink->retransmit_to;
173 * bclink_retransmit_pkt - retransmit broadcast packets
174 * @after: sequence number of last packet to *not* retransmit
175 * @to: sequence number of last packet to retransmit
177 * Called with bclink_lock locked
179 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
182 struct tipc_link *bcl = tn->bcl;
184 skb_queue_walk(&bcl->transmq, skb) {
185 if (more(buf_seqno(skb), after)) {
186 tipc_link_retransmit(bcl, skb, mod(to - after));
193 * tipc_bclink_wakeup_users - wake up pending users
195 * Called with no locks taken
197 void tipc_bclink_wakeup_users(struct net *net)
199 struct tipc_net *tn = net_generic(net, tipc_net_id);
201 tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
205 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
206 * @n_ptr: node that sent acknowledgement info
207 * @acked: broadcast sequence # that has been acknowledged
209 * Node is locked, bclink_lock unlocked.
211 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
213 struct sk_buff *skb, *tmp;
214 unsigned int released = 0;
215 struct net *net = n_ptr->net;
216 struct tipc_net *tn = net_generic(net, tipc_net_id);
218 if (unlikely(!n_ptr->bclink.recv_permitted))
221 tipc_bclink_lock(net);
223 /* Bail out if tx queue is empty (no clean up is required) */
224 skb = skb_peek(&tn->bcl->transmq);
228 /* Determine which messages need to be acknowledged */
229 if (acked == INVALID_LINK_SEQ) {
231 * Contact with specified node has been lost, so need to
232 * acknowledge sent messages only (if other nodes still exist)
233 * or both sent and unsent messages (otherwise)
235 if (tn->bclink->bcast_nodes.count)
236 acked = tn->bcl->fsm_msg_cnt;
238 acked = tn->bcl->next_out_no;
241 * Bail out if specified sequence number does not correspond
242 * to a message that has been sent and not yet acknowledged
244 if (less(acked, buf_seqno(skb)) ||
245 less(tn->bcl->fsm_msg_cnt, acked) ||
246 less_eq(acked, n_ptr->bclink.acked))
250 /* Skip over packets that node has previously acknowledged */
251 skb_queue_walk(&tn->bcl->transmq, skb) {
252 if (more(buf_seqno(skb), n_ptr->bclink.acked))
256 /* Update packets that node is now acknowledging */
257 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
258 if (more(buf_seqno(skb), acked))
260 bcbuf_decr_acks(skb);
261 bclink_set_last_sent(net);
262 if (bcbuf_acks(skb) == 0) {
263 __skb_unlink(skb, &tn->bcl->transmq);
268 n_ptr->bclink.acked = acked;
270 /* Try resolving broadcast link congestion, if necessary */
271 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
272 tipc_link_push_packets(tn->bcl);
273 bclink_set_last_sent(net);
275 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
276 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
278 tipc_bclink_unlock(net);
282 * tipc_bclink_update_link_state - update broadcast link state
284 * RCU and node lock set
286 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
290 struct net *net = n_ptr->net;
291 struct tipc_net *tn = net_generic(net, tipc_net_id);
293 /* Ignore "stale" link state info */
294 if (less_eq(last_sent, n_ptr->bclink.last_in))
297 /* Update link synchronization state; quit if in sync */
298 bclink_update_last_sent(n_ptr, last_sent);
300 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
303 /* Update out-of-sync state; quit if loss is still unconfirmed */
304 if ((++n_ptr->bclink.oos_state) == 1) {
305 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
307 n_ptr->bclink.oos_state++;
310 /* Don't NACK if one has been recently sent (or seen) */
311 if (n_ptr->bclink.oos_state & 0x1)
315 buf = tipc_buf_acquire(INT_H_SIZE);
317 struct tipc_msg *msg = buf_msg(buf);
318 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
319 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
321 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
322 INT_H_SIZE, n_ptr->addr);
323 msg_set_non_seq(msg, 1);
324 msg_set_mc_netid(msg, tn->net_id);
325 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
326 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
327 msg_set_bcgap_to(msg, to);
329 tipc_bclink_lock(net);
330 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
331 tn->bcl->stats.sent_nacks++;
332 tipc_bclink_unlock(net);
335 n_ptr->bclink.oos_state++;
340 * bclink_peek_nack - monitor retransmission requests sent by other nodes
342 * Delay any upcoming NACK by this node if another node has already
343 * requested the first message this node is going to ask for.
345 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
347 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
349 if (unlikely(!n_ptr))
352 tipc_node_lock(n_ptr);
354 if (n_ptr->bclink.recv_permitted &&
355 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
356 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
357 n_ptr->bclink.oos_state = 2;
359 tipc_node_unlock(n_ptr);
362 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
363 * and to identified node local sockets
364 * @net: the applicable net namespace
365 * @list: chain of buffers containing message
366 * Consumes the buffer chain, except when returning -ELINKCONG
367 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
369 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
371 struct tipc_net *tn = net_generic(net, tipc_net_id);
372 struct tipc_link *bcl = tn->bcl;
373 struct tipc_bclink *bclink = tn->bclink;
377 struct sk_buff_head arrvq;
378 struct sk_buff_head inputq;
380 /* Prepare clone of message for local node */
381 skb = tipc_msg_reassemble(list);
382 if (unlikely(!skb)) {
383 __skb_queue_purge(list);
384 return -EHOSTUNREACH;
387 /* Broadcast to all nodes */
388 if (likely(bclink)) {
389 tipc_bclink_lock(net);
390 if (likely(bclink->bcast_nodes.count)) {
391 rc = __tipc_link_xmit(net, bcl, list);
393 u32 len = skb_queue_len(&bcl->transmq);
395 bclink_set_last_sent(net);
396 bcl->stats.queue_sz_counts++;
397 bcl->stats.accu_queue_sz += len;
401 tipc_bclink_unlock(net);
405 __skb_queue_purge(list);
411 /* Deliver message clone */
412 __skb_queue_head_init(&arrvq);
413 skb_queue_head_init(&inputq);
414 __skb_queue_tail(&arrvq, skb);
415 tipc_sk_mcast_rcv(net, &arrvq, &inputq);
420 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
422 * Called with both sending node's lock and bclink_lock taken.
424 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
426 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
428 bclink_update_last_sent(node, seqno);
429 node->bclink.last_in = seqno;
430 node->bclink.oos_state = 0;
431 tn->bcl->stats.recv_info++;
434 * Unicast an ACK periodically, ensuring that
435 * all nodes in the cluster don't ACK at the same time
437 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
438 tipc_link_proto_xmit(node->active_links[node->addr & 1],
439 STATE_MSG, 0, 0, 0, 0, 0);
440 tn->bcl->stats.sent_acks++;
445 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
447 * RCU is locked, no other locks set
449 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
451 struct tipc_net *tn = net_generic(net, tipc_net_id);
452 struct tipc_link *bcl = tn->bcl;
453 struct tipc_msg *msg = buf_msg(buf);
454 struct tipc_node *node;
459 struct sk_buff *iskb;
460 struct sk_buff_head *arrvq, *inputq;
462 /* Screen out unwanted broadcast messages */
463 if (msg_mc_netid(msg) != tn->net_id)
466 node = tipc_node_find(net, msg_prevnode(msg));
470 tipc_node_lock(node);
471 if (unlikely(!node->bclink.recv_permitted))
474 /* Handle broadcast protocol message */
475 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
476 if (msg_type(msg) != STATE_MSG)
478 if (msg_destnode(msg) == tn->own_addr) {
479 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
480 tipc_node_unlock(node);
481 tipc_bclink_lock(net);
482 bcl->stats.recv_nacks++;
483 tn->bclink->retransmit_to = node;
484 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
486 tipc_bclink_unlock(net);
488 tipc_node_unlock(node);
489 bclink_peek_nack(net, msg);
494 /* Handle in-sequence broadcast message */
495 seqno = msg_seqno(msg);
496 next_in = mod(node->bclink.last_in + 1);
497 arrvq = &tn->bclink->arrvq;
498 inputq = &tn->bclink->inputq;
500 if (likely(seqno == next_in)) {
502 /* Deliver message to destination */
503 if (likely(msg_isdata(msg))) {
504 tipc_bclink_lock(net);
505 bclink_accept_pkt(node, seqno);
506 spin_lock_bh(&inputq->lock);
507 __skb_queue_tail(arrvq, buf);
508 spin_unlock_bh(&inputq->lock);
509 node->action_flags |= TIPC_BCAST_MSG_EVT;
510 tipc_bclink_unlock(net);
511 tipc_node_unlock(node);
512 } else if (msg_user(msg) == MSG_BUNDLER) {
513 tipc_bclink_lock(net);
514 bclink_accept_pkt(node, seqno);
515 bcl->stats.recv_bundles++;
516 bcl->stats.recv_bundled += msg_msgcnt(msg);
518 while (tipc_msg_extract(buf, &iskb, &pos)) {
519 spin_lock_bh(&inputq->lock);
520 __skb_queue_tail(arrvq, iskb);
521 spin_unlock_bh(&inputq->lock);
523 node->action_flags |= TIPC_BCAST_MSG_EVT;
524 tipc_bclink_unlock(net);
525 tipc_node_unlock(node);
526 } else if (msg_user(msg) == MSG_FRAGMENTER) {
527 tipc_buf_append(&node->bclink.reasm_buf, &buf);
528 if (unlikely(!buf && !node->bclink.reasm_buf))
530 tipc_bclink_lock(net);
531 bclink_accept_pkt(node, seqno);
532 bcl->stats.recv_fragments++;
534 bcl->stats.recv_fragmented++;
536 tipc_bclink_unlock(net);
539 tipc_bclink_unlock(net);
540 tipc_node_unlock(node);
542 tipc_bclink_lock(net);
543 bclink_accept_pkt(node, seqno);
544 tipc_bclink_unlock(net);
545 tipc_node_unlock(node);
550 /* Determine new synchronization state */
551 tipc_node_lock(node);
552 if (unlikely(!tipc_node_is_up(node)))
555 if (node->bclink.last_in == node->bclink.last_sent)
558 if (skb_queue_empty(&node->bclink.deferdq)) {
559 node->bclink.oos_state = 1;
563 msg = buf_msg(skb_peek(&node->bclink.deferdq));
564 seqno = msg_seqno(msg);
565 next_in = mod(next_in + 1);
566 if (seqno != next_in)
569 /* Take in-sequence message from deferred queue & deliver it */
570 buf = __skb_dequeue(&node->bclink.deferdq);
574 /* Handle out-of-sequence broadcast message */
575 if (less(next_in, seqno)) {
576 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
578 bclink_update_last_sent(node, seqno);
582 tipc_bclink_lock(net);
585 bcl->stats.deferred_recv++;
587 bcl->stats.duplicates++;
589 tipc_bclink_unlock(net);
592 tipc_node_unlock(node);
597 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
599 return (n_ptr->bclink.recv_permitted &&
600 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
605 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
607 * Send packet over as many bearers as necessary to reach all nodes
608 * that have joined the broadcast link.
610 * Returns 0 (packet sent successfully) under all circumstances,
611 * since the broadcast link's pseudo-bearer never blocks
613 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
614 struct tipc_bearer *unused1,
615 struct tipc_media_addr *unused2)
618 struct tipc_msg *msg = buf_msg(buf);
619 struct tipc_net *tn = net_generic(net, tipc_net_id);
620 struct tipc_bcbearer *bcbearer = tn->bcbearer;
621 struct tipc_bclink *bclink = tn->bclink;
623 /* Prepare broadcast link message for reliable transmission,
624 * if first time trying to send it;
625 * preparation is skipped for broadcast link protocol messages
626 * since they are sent in an unreliable manner and don't need it
628 if (likely(!msg_non_seq(buf_msg(buf)))) {
629 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
630 msg_set_non_seq(msg, 1);
631 msg_set_mc_netid(msg, tn->net_id);
632 tn->bcl->stats.sent_info++;
633 if (WARN_ON(!bclink->bcast_nodes.count)) {
639 /* Send buffer over bearers until all targets reached */
640 bcbearer->remains = bclink->bcast_nodes;
642 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
643 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
644 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
645 struct tipc_bearer *bp[2] = {p, s};
646 struct tipc_bearer *b = bp[msg_link_selector(msg)];
647 struct sk_buff *tbuf;
650 break; /* No more bearers to try */
653 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
654 &bcbearer->remains_new);
655 if (bcbearer->remains_new.count == bcbearer->remains.count)
656 continue; /* Nothing added by bearer pair */
659 /* Use original buffer for first bearer */
660 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
662 /* Avoid concurrent buffer access */
663 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
666 tipc_bearer_send(net, b->identity, tbuf,
668 kfree_skb(tbuf); /* Bearer keeps a clone */
670 if (bcbearer->remains_new.count == 0)
671 break; /* All targets reached */
673 bcbearer->remains = bcbearer->remains_new;
680 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
682 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
683 u32 node, bool action)
685 struct tipc_net *tn = net_generic(net, tipc_net_id);
686 struct tipc_bcbearer *bcbearer = tn->bcbearer;
687 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
688 struct tipc_bcbearer_pair *bp_curr;
689 struct tipc_bearer *b;
693 tipc_bclink_lock(net);
696 tipc_nmap_add(nm_ptr, node);
698 tipc_nmap_remove(nm_ptr, node);
700 /* Group bearers by priority (can assume max of two per priority) */
701 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
704 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
705 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
706 if (!b || !b->nodes.count)
709 if (!bp_temp[b->priority].primary)
710 bp_temp[b->priority].primary = b;
712 bp_temp[b->priority].secondary = b;
716 /* Create array of bearer pairs for broadcasting */
717 bp_curr = bcbearer->bpairs;
718 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
720 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
722 if (!bp_temp[pri].primary)
725 bp_curr->primary = bp_temp[pri].primary;
727 if (bp_temp[pri].secondary) {
728 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
729 &bp_temp[pri].secondary->nodes)) {
730 bp_curr->secondary = bp_temp[pri].secondary;
733 bp_curr->primary = bp_temp[pri].secondary;
740 tipc_bclink_unlock(net);
743 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
744 struct tipc_stats *stats)
754 struct nla_map map[] = {
755 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
756 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
757 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
758 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
759 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
760 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
761 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
762 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
763 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
764 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
765 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
766 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
767 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
768 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
769 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
770 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
771 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
772 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
773 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
774 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
777 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
781 for (i = 0; i < ARRAY_SIZE(map); i++)
782 if (nla_put_u32(skb, map[i].key, map[i].val))
785 nla_nest_end(skb, nest);
789 nla_nest_cancel(skb, nest);
794 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
798 struct nlattr *attrs;
800 struct tipc_net *tn = net_generic(net, tipc_net_id);
801 struct tipc_link *bcl = tn->bcl;
806 tipc_bclink_lock(net);
808 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
809 NLM_F_MULTI, TIPC_NL_LINK_GET);
813 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
817 /* The broadcast link is always up */
818 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
821 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
823 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
825 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
827 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
830 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
833 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
835 nla_nest_end(msg->skb, prop);
837 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
841 tipc_bclink_unlock(net);
842 nla_nest_end(msg->skb, attrs);
843 genlmsg_end(msg->skb, hdr);
848 nla_nest_cancel(msg->skb, prop);
850 nla_nest_cancel(msg->skb, attrs);
852 tipc_bclink_unlock(net);
853 genlmsg_cancel(msg->skb, hdr);
858 int tipc_bclink_reset_stats(struct net *net)
860 struct tipc_net *tn = net_generic(net, tipc_net_id);
861 struct tipc_link *bcl = tn->bcl;
866 tipc_bclink_lock(net);
867 memset(&bcl->stats, 0, sizeof(bcl->stats));
868 tipc_bclink_unlock(net);
872 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
874 struct tipc_net *tn = net_generic(net, tipc_net_id);
875 struct tipc_link *bcl = tn->bcl;
879 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
882 tipc_bclink_lock(net);
883 tipc_link_set_queue_limits(bcl, limit);
884 tipc_bclink_unlock(net);
888 int tipc_bclink_init(struct net *net)
890 struct tipc_net *tn = net_generic(net, tipc_net_id);
891 struct tipc_bcbearer *bcbearer;
892 struct tipc_bclink *bclink;
893 struct tipc_link *bcl;
895 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
899 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
906 bcbearer->bearer.media = &bcbearer->media;
907 bcbearer->media.send_msg = tipc_bcbearer_send;
908 sprintf(bcbearer->media.name, "tipc-broadcast");
910 spin_lock_init(&bclink->lock);
911 __skb_queue_head_init(&bcl->transmq);
912 __skb_queue_head_init(&bcl->backlogq);
913 __skb_queue_head_init(&bcl->deferdq);
914 skb_queue_head_init(&bcl->wakeupq);
915 bcl->next_out_no = 1;
916 spin_lock_init(&bclink->node.lock);
917 __skb_queue_head_init(&bclink->arrvq);
918 skb_queue_head_init(&bclink->inputq);
919 bcl->owner = &bclink->node;
920 bcl->owner->net = net;
921 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
922 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
923 bcl->bearer_id = MAX_BEARERS;
924 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
925 bcl->state = WORKING_WORKING;
926 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
927 msg_set_prevnode(bcl->pmsg, tn->own_addr);
928 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
929 tn->bcbearer = bcbearer;
935 void tipc_bclink_stop(struct net *net)
937 struct tipc_net *tn = net_generic(net, tipc_net_id);
939 tipc_bclink_lock(net);
940 tipc_link_purge_queues(tn->bcl);
941 tipc_bclink_unlock(net);
943 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
950 * tipc_nmap_add - add a node to a node map
952 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
954 int n = tipc_node(node);
956 u32 mask = (1 << (n % WSIZE));
958 if ((nm_ptr->map[w] & mask) == 0) {
960 nm_ptr->map[w] |= mask;
965 * tipc_nmap_remove - remove a node from a node map
967 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
969 int n = tipc_node(node);
971 u32 mask = (1 << (n % WSIZE));
973 if ((nm_ptr->map[w] & mask) != 0) {
974 nm_ptr->map[w] &= ~mask;
980 * tipc_nmap_diff - find differences between node maps
981 * @nm_a: input node map A
982 * @nm_b: input node map B
983 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
985 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
986 struct tipc_node_map *nm_b,
987 struct tipc_node_map *nm_diff)
989 int stop = ARRAY_SIZE(nm_a->map);
994 memset(nm_diff, 0, sizeof(*nm_diff));
995 for (w = 0; w < stop; w++) {
996 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
997 nm_diff->map[w] = map;
999 for (b = 0 ; b < WSIZE; b++) {