2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool;
49 DEFINE_STATIC_SRCU(netpoll_srcu);
51 #define USEC_PER_POLL 50
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
59 static void zap_completion_queue(void);
60 static void netpoll_async_cleanup(struct work_struct *work);
62 static unsigned int carrier_timeout = 4;
63 module_param(carrier_timeout, uint, 0644);
65 #define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69 #define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72 static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
78 features = netif_skb_features(skb);
80 if (vlan_tx_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_put_tag(skb, skb->vlan_proto,
83 vlan_tx_tag_get(skb));
85 /* This is actually a packet drop, but we
86 * don't want the code that calls this
87 * function to try and operate on a NULL skb.
94 status = netdev_start_xmit(skb, dev);
95 if (status == NETDEV_TX_OK)
96 txq_trans_update(txq);
102 static void queue_process(struct work_struct *work)
104 struct netpoll_info *npinfo =
105 container_of(work, struct netpoll_info, tx_work.work);
109 while ((skb = skb_dequeue(&npinfo->txq))) {
110 struct net_device *dev = skb->dev;
111 struct netdev_queue *txq;
113 if (!netif_device_present(dev) || !netif_running(dev)) {
118 txq = skb_get_tx_queue(dev, skb);
120 local_irq_save(flags);
121 HARD_TX_LOCK(dev, txq, smp_processor_id());
122 if (netif_xmit_frozen_or_stopped(txq) ||
123 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
124 skb_queue_head(&npinfo->txq, skb);
125 HARD_TX_UNLOCK(dev, txq);
126 local_irq_restore(flags);
128 schedule_delayed_work(&npinfo->tx_work, HZ/10);
131 HARD_TX_UNLOCK(dev, txq);
132 local_irq_restore(flags);
137 * Check whether delayed processing was scheduled for our NIC. If so,
138 * we attempt to grab the poll lock and use ->poll() to pump the card.
139 * If this fails, either we've recursed in ->poll() or it's already
140 * running on another CPU.
142 * Note: we don't mask interrupts with this lock because we're using
143 * trylock here and interrupts are already disabled in the softirq
144 * case. Further, we test the poll_owner to avoid recursion on UP
145 * systems where the lock doesn't exist.
147 static int poll_one_napi(struct napi_struct *napi, int budget)
151 /* net_rx_action's ->poll() invocations and our's are
152 * synchronized by this test which is only made while
153 * holding the napi->poll_lock.
155 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158 set_bit(NAPI_STATE_NPSVC, &napi->state);
160 work = napi->poll(napi, budget);
161 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
162 trace_napi_poll(napi);
164 clear_bit(NAPI_STATE_NPSVC, &napi->state);
166 return budget - work;
169 static void poll_napi(struct net_device *dev, int budget)
171 struct napi_struct *napi;
173 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 if (napi->poll_owner != smp_processor_id() &&
175 spin_trylock(&napi->poll_lock)) {
176 budget = poll_one_napi(napi, budget);
177 spin_unlock(&napi->poll_lock);
182 static void netpoll_poll_dev(struct net_device *dev)
184 const struct net_device_ops *ops;
185 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
188 /* Don't do any rx activity if the dev_lock mutex is held
189 * the dev_open/close paths use this to block netpoll activity
190 * while changing device state
192 if (down_trylock(&ni->dev_lock))
195 if (!netif_running(dev)) {
200 ops = dev->netdev_ops;
201 if (!ops->ndo_poll_controller) {
206 /* Process pending work on NIC */
207 ops->ndo_poll_controller(dev);
209 poll_napi(dev, budget);
213 zap_completion_queue();
216 void netpoll_poll_disable(struct net_device *dev)
218 struct netpoll_info *ni;
221 idx = srcu_read_lock(&netpoll_srcu);
222 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
225 srcu_read_unlock(&netpoll_srcu, idx);
227 EXPORT_SYMBOL(netpoll_poll_disable);
229 void netpoll_poll_enable(struct net_device *dev)
231 struct netpoll_info *ni;
233 ni = rcu_dereference(dev->npinfo);
238 EXPORT_SYMBOL(netpoll_poll_enable);
240 static void refill_skbs(void)
245 spin_lock_irqsave(&skb_pool.lock, flags);
246 while (skb_pool.qlen < MAX_SKBS) {
247 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
251 __skb_queue_tail(&skb_pool, skb);
253 spin_unlock_irqrestore(&skb_pool.lock, flags);
256 static void zap_completion_queue(void)
259 struct softnet_data *sd = &get_cpu_var(softnet_data);
261 if (sd->completion_queue) {
262 struct sk_buff *clist;
264 local_irq_save(flags);
265 clist = sd->completion_queue;
266 sd->completion_queue = NULL;
267 local_irq_restore(flags);
269 while (clist != NULL) {
270 struct sk_buff *skb = clist;
272 if (!skb_irq_freeable(skb)) {
273 atomic_inc(&skb->users);
274 dev_kfree_skb_any(skb); /* put this one back */
281 put_cpu_var(softnet_data);
284 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289 zap_completion_queue();
293 skb = alloc_skb(len, GFP_ATOMIC);
295 skb = skb_dequeue(&skb_pool);
299 netpoll_poll_dev(np->dev);
305 atomic_set(&skb->users, 1);
306 skb_reserve(skb, reserve);
310 static int netpoll_owner_active(struct net_device *dev)
312 struct napi_struct *napi;
314 list_for_each_entry(napi, &dev->napi_list, dev_list) {
315 if (napi->poll_owner == smp_processor_id())
321 /* call with IRQ disabled */
322 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
323 struct net_device *dev)
325 int status = NETDEV_TX_BUSY;
327 /* It is up to the caller to keep npinfo alive. */
328 struct netpoll_info *npinfo;
330 WARN_ON_ONCE(!irqs_disabled());
332 npinfo = rcu_dereference_bh(np->dev->npinfo);
333 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
334 dev_kfree_skb_irq(skb);
338 /* don't get messages out of order, and no recursion */
339 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
340 struct netdev_queue *txq;
342 txq = netdev_pick_tx(dev, skb, NULL);
344 /* try until next clock tick */
345 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
346 tries > 0; --tries) {
347 if (HARD_TX_TRYLOCK(dev, txq)) {
348 if (!netif_xmit_stopped(txq))
349 status = netpoll_start_xmit(skb, dev, txq);
351 HARD_TX_UNLOCK(dev, txq);
353 if (status == NETDEV_TX_OK)
358 /* tickle device maybe there is some cleanup */
359 netpoll_poll_dev(np->dev);
361 udelay(USEC_PER_POLL);
364 WARN_ONCE(!irqs_disabled(),
365 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
366 dev->name, dev->netdev_ops->ndo_start_xmit);
370 if (status != NETDEV_TX_OK) {
371 skb_queue_tail(&npinfo->txq, skb);
372 schedule_delayed_work(&npinfo->tx_work,0);
375 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
377 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
379 int total_len, ip_len, udp_len;
384 static atomic_t ip_ident;
385 struct ipv6hdr *ip6h;
387 udp_len = len + sizeof(*udph);
389 ip_len = udp_len + sizeof(*ip6h);
391 ip_len = udp_len + sizeof(*iph);
393 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
395 skb = find_skb(np, total_len + np->dev->needed_tailroom,
400 skb_copy_to_linear_data(skb, msg, len);
403 skb_push(skb, sizeof(*udph));
404 skb_reset_transport_header(skb);
406 udph->source = htons(np->local_port);
407 udph->dest = htons(np->remote_port);
408 udph->len = htons(udp_len);
412 udph->check = csum_ipv6_magic(&np->local_ip.in6,
414 udp_len, IPPROTO_UDP,
415 csum_partial(udph, udp_len, 0));
416 if (udph->check == 0)
417 udph->check = CSUM_MANGLED_0;
419 skb_push(skb, sizeof(*ip6h));
420 skb_reset_network_header(skb);
421 ip6h = ipv6_hdr(skb);
423 /* ip6h->version = 6; ip6h->priority = 0; */
424 put_unaligned(0x60, (unsigned char *)ip6h);
425 ip6h->flow_lbl[0] = 0;
426 ip6h->flow_lbl[1] = 0;
427 ip6h->flow_lbl[2] = 0;
429 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
430 ip6h->nexthdr = IPPROTO_UDP;
431 ip6h->hop_limit = 32;
432 ip6h->saddr = np->local_ip.in6;
433 ip6h->daddr = np->remote_ip.in6;
435 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
436 skb_reset_mac_header(skb);
437 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
440 udph->check = csum_tcpudp_magic(np->local_ip.ip,
442 udp_len, IPPROTO_UDP,
443 csum_partial(udph, udp_len, 0));
444 if (udph->check == 0)
445 udph->check = CSUM_MANGLED_0;
447 skb_push(skb, sizeof(*iph));
448 skb_reset_network_header(skb);
451 /* iph->version = 4; iph->ihl = 5; */
452 put_unaligned(0x45, (unsigned char *)iph);
454 put_unaligned(htons(ip_len), &(iph->tot_len));
455 iph->id = htons(atomic_inc_return(&ip_ident));
458 iph->protocol = IPPROTO_UDP;
460 put_unaligned(np->local_ip.ip, &(iph->saddr));
461 put_unaligned(np->remote_ip.ip, &(iph->daddr));
462 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
464 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
465 skb_reset_mac_header(skb);
466 skb->protocol = eth->h_proto = htons(ETH_P_IP);
469 ether_addr_copy(eth->h_source, np->dev->dev_addr);
470 ether_addr_copy(eth->h_dest, np->remote_mac);
474 netpoll_send_skb(np, skb);
476 EXPORT_SYMBOL(netpoll_send_udp);
478 void netpoll_print_options(struct netpoll *np)
480 np_info(np, "local port %d\n", np->local_port);
482 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
484 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
485 np_info(np, "interface '%s'\n", np->dev_name);
486 np_info(np, "remote port %d\n", np->remote_port);
488 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
490 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
491 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
493 EXPORT_SYMBOL(netpoll_print_options);
495 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
499 if (!strchr(str, ':') &&
500 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
504 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
505 #if IS_ENABLED(CONFIG_IPV6)
515 int netpoll_parse_options(struct netpoll *np, char *opt)
517 char *cur=opt, *delim;
519 bool ipversion_set = false;
522 if ((delim = strchr(cur, '@')) == NULL)
525 if (kstrtou16(cur, 10, &np->local_port))
532 ipversion_set = true;
533 if ((delim = strchr(cur, '/')) == NULL)
536 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
540 np->ipv6 = (bool)ipv6;
546 /* parse out dev name */
547 if ((delim = strchr(cur, ',')) == NULL)
550 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
557 if ((delim = strchr(cur, '@')) == NULL)
560 if (*cur == ' ' || *cur == '\t')
561 np_info(np, "warning: whitespace is not allowed\n");
562 if (kstrtou16(cur, 10, &np->remote_port))
569 if ((delim = strchr(cur, '/')) == NULL)
572 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
575 else if (ipversion_set && np->ipv6 != (bool)ipv6)
578 np->ipv6 = (bool)ipv6;
583 if (!mac_pton(cur, np->remote_mac))
587 netpoll_print_options(np);
592 np_info(np, "couldn't parse config at '%s'!\n", cur);
595 EXPORT_SYMBOL(netpoll_parse_options);
597 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
599 struct netpoll_info *npinfo;
600 const struct net_device_ops *ops;
604 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
605 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
607 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
608 !ndev->netdev_ops->ndo_poll_controller) {
609 np_err(np, "%s doesn't support polling, aborting\n",
616 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
622 sema_init(&npinfo->dev_lock, 1);
623 skb_queue_head_init(&npinfo->txq);
624 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
626 atomic_set(&npinfo->refcnt, 1);
628 ops = np->dev->netdev_ops;
629 if (ops->ndo_netpoll_setup) {
630 err = ops->ndo_netpoll_setup(ndev, npinfo);
635 npinfo = rtnl_dereference(ndev->npinfo);
636 atomic_inc(&npinfo->refcnt);
639 npinfo->netpoll = np;
641 /* last thing to do is link it to the net device structure */
642 rcu_assign_pointer(ndev->npinfo, npinfo);
651 EXPORT_SYMBOL_GPL(__netpoll_setup);
653 int netpoll_setup(struct netpoll *np)
655 struct net_device *ndev = NULL;
656 struct in_device *in_dev;
661 struct net *net = current->nsproxy->net_ns;
662 ndev = __dev_get_by_name(net, np->dev_name);
665 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
671 if (netdev_master_upper_dev_get(ndev)) {
672 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
677 if (!netif_running(ndev)) {
678 unsigned long atmost, atleast;
680 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
682 err = dev_open(ndev);
685 np_err(np, "failed to open %s\n", ndev->name);
690 atleast = jiffies + HZ/10;
691 atmost = jiffies + carrier_timeout * HZ;
692 while (!netif_carrier_ok(ndev)) {
693 if (time_after(jiffies, atmost)) {
694 np_notice(np, "timeout waiting for carrier\n");
700 /* If carrier appears to come up instantly, we don't
701 * trust it and pause so that we don't pump all our
702 * queued console messages into the bitbucket.
705 if (time_before(jiffies, atleast)) {
706 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
712 if (!np->local_ip.ip) {
714 in_dev = __in_dev_get_rtnl(ndev);
716 if (!in_dev || !in_dev->ifa_list) {
717 np_err(np, "no IP address for %s, aborting\n",
723 np->local_ip.ip = in_dev->ifa_list->ifa_local;
724 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
726 #if IS_ENABLED(CONFIG_IPV6)
727 struct inet6_dev *idev;
730 idev = __in6_dev_get(ndev);
732 struct inet6_ifaddr *ifp;
734 read_lock_bh(&idev->lock);
735 list_for_each_entry(ifp, &idev->addr_list, if_list) {
736 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
738 np->local_ip.in6 = ifp->addr;
742 read_unlock_bh(&idev->lock);
745 np_err(np, "no IPv6 address for %s, aborting\n",
749 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
751 np_err(np, "IPv6 is not supported %s, aborting\n",
759 /* fill up the skb queue */
762 err = __netpoll_setup(np, ndev);
775 EXPORT_SYMBOL(netpoll_setup);
777 static int __init netpoll_init(void)
779 skb_queue_head_init(&skb_pool);
782 core_initcall(netpoll_init);
784 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
786 struct netpoll_info *npinfo =
787 container_of(rcu_head, struct netpoll_info, rcu);
789 skb_queue_purge(&npinfo->txq);
791 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
792 cancel_delayed_work(&npinfo->tx_work);
794 /* clean after last, unfinished work */
795 __skb_queue_purge(&npinfo->txq);
796 /* now cancel it again */
797 cancel_delayed_work(&npinfo->tx_work);
801 void __netpoll_cleanup(struct netpoll *np)
803 struct netpoll_info *npinfo;
805 /* rtnl_dereference would be preferable here but
806 * rcu_cleanup_netpoll path can put us in here safely without
807 * holding the rtnl, so plain rcu_dereference it is
809 npinfo = rtnl_dereference(np->dev->npinfo);
813 synchronize_srcu(&netpoll_srcu);
815 if (atomic_dec_and_test(&npinfo->refcnt)) {
816 const struct net_device_ops *ops;
818 ops = np->dev->netdev_ops;
819 if (ops->ndo_netpoll_cleanup)
820 ops->ndo_netpoll_cleanup(np->dev);
822 RCU_INIT_POINTER(np->dev->npinfo, NULL);
823 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
825 RCU_INIT_POINTER(np->dev->npinfo, NULL);
827 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
829 static void netpoll_async_cleanup(struct work_struct *work)
831 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
834 __netpoll_cleanup(np);
839 void __netpoll_free_async(struct netpoll *np)
841 schedule_work(&np->cleanup_work);
843 EXPORT_SYMBOL_GPL(__netpoll_free_async);
845 void netpoll_cleanup(struct netpoll *np)
850 __netpoll_cleanup(np);
856 EXPORT_SYMBOL(netpoll_cleanup);