1 /* Copyright 2011, Siemens AG
2 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
5 /* Based on patches from Jon Smirl <jonsmirl@gmail.com>
6 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 /* Jon's code is based on 6lowpan implementation for Contiki which is:
19 * Copyright (c) 2008, Swedish Institute of Computer Science.
20 * All rights reserved.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the Institute nor the names of its contributors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 #include <linux/bitops.h>
48 #include <linux/if_arp.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/netdevice.h>
52 #include <linux/ieee802154.h>
53 #include <net/af_ieee802154.h>
54 #include <net/ieee802154_netdev.h>
55 #include <net/6lowpan.h>
58 #include "reassembly.h"
60 static LIST_HEAD(lowpan_devices);
61 static int lowpan_open_count;
63 /* private device info */
64 struct lowpan_dev_info {
65 struct net_device *real_dev; /* real WPAN device ptr */
66 struct mutex dev_list_mtx; /* mutex for list ops */
70 struct lowpan_dev_record {
71 struct net_device *ldev;
72 struct list_head list;
75 /* don't save pan id, it's intra pan */
79 /* IPv6 needs big endian here */
85 struct lowpan_addr_info {
86 struct lowpan_addr daddr;
87 struct lowpan_addr saddr;
91 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93 return netdev_priv(dev);
97 lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
99 WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
100 return (struct lowpan_addr_info *)(skb->data -
101 sizeof(struct lowpan_addr_info));
104 static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
105 unsigned short type, const void *_daddr,
106 const void *_saddr, unsigned int len)
108 const u8 *saddr = _saddr;
109 const u8 *daddr = _daddr;
110 struct lowpan_addr_info *info;
113 * if this package isn't ipv6 one, where should it be routed?
115 if (type != ETH_P_IPV6)
119 saddr = dev->dev_addr;
121 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
122 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124 info = lowpan_skb_priv(skb);
126 /* TODO: Currently we only support extended_addr */
127 info->daddr.mode = IEEE802154_ADDR_LONG;
128 memcpy(&info->daddr.u.extended_addr, daddr,
129 sizeof(info->daddr.u.extended_addr));
130 info->saddr.mode = IEEE802154_ADDR_LONG;
131 memcpy(&info->saddr.u.extended_addr, saddr,
132 sizeof(info->daddr.u.extended_addr));
137 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
138 struct net_device *dev)
140 struct lowpan_dev_record *entry;
141 struct sk_buff *skb_cp;
142 int stat = NET_RX_SUCCESS;
144 skb->protocol = htons(ETH_P_IPV6);
145 skb->pkt_type = PACKET_HOST;
148 list_for_each_entry_rcu(entry, &lowpan_devices, list)
149 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
150 skb_cp = skb_copy(skb, GFP_ATOMIC);
157 skb_cp->dev = entry->ldev;
158 stat = netif_rx(skb_cp);
159 if (stat == NET_RX_DROP)
170 iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
173 struct ieee802154_addr_sa sa, da;
176 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
177 /* at least two bytes will be used for the encoding */
181 if (lowpan_fetch_skb_u8(skb, &iphc0))
184 if (lowpan_fetch_skb_u8(skb, &iphc1))
187 ieee802154_addr_to_sa(&sa, &hdr->source);
188 ieee802154_addr_to_sa(&da, &hdr->dest);
190 if (sa.addr_type == IEEE802154_ADDR_SHORT)
191 sap = &sa.short_addr;
195 if (da.addr_type == IEEE802154_ADDR_SHORT)
196 dap = &da.short_addr;
200 return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
201 IEEE802154_ADDR_LEN, dap, da.addr_type,
202 IEEE802154_ADDR_LEN, iphc0, iphc1);
209 static int lowpan_set_address(struct net_device *dev, void *p)
211 struct sockaddr *sa = p;
213 if (netif_running(dev))
216 /* TODO: validate addr */
217 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
222 static struct sk_buff*
223 lowpan_alloc_frag(struct sk_buff *skb, int size,
224 const struct ieee802154_hdr *master_hdr)
226 struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
227 struct sk_buff *frag;
230 frag = alloc_skb(real_dev->hard_header_len +
231 real_dev->needed_tailroom + size,
235 frag->dev = real_dev;
236 frag->priority = skb->priority;
237 skb_reserve(frag, real_dev->hard_header_len);
238 skb_reset_network_header(frag);
239 *mac_cb(frag) = *mac_cb(skb);
241 rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
242 &master_hdr->source, size);
248 frag = ERR_PTR(-ENOMEM);
255 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
256 u8 *frag_hdr, int frag_hdrlen,
259 struct sk_buff *frag;
261 raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
263 frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
265 return -PTR_ERR(frag);
267 memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
268 memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
270 raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
272 return dev_queue_xmit(frag);
276 lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
277 const struct ieee802154_hdr *wpan_hdr)
279 u16 dgram_size, dgram_offset;
282 int frag_cap, frag_len, payload_cap, rc;
283 int skb_unprocessed, skb_offset;
285 dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
287 frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
288 lowpan_dev_info(dev)->fragment_tag++;
290 frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
291 frag_hdr[1] = dgram_size & 0xff;
292 memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
294 payload_cap = ieee802154_max_payload(wpan_hdr);
296 frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
297 skb_network_header_len(skb), 8);
299 skb_offset = skb_network_header_len(skb);
300 skb_unprocessed = skb->len - skb->mac_len - skb_offset;
302 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
303 LOWPAN_FRAG1_HEAD_SIZE, 0,
304 frag_len + skb_network_header_len(skb));
306 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
307 __func__, ntohs(frag_tag));
311 frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
312 frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
313 frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
316 dgram_offset += frag_len;
317 skb_offset += frag_len;
318 skb_unprocessed -= frag_len;
319 frag_len = min(frag_cap, skb_unprocessed);
321 frag_hdr[4] = dgram_offset >> 3;
323 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
324 LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
327 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
328 __func__, ntohs(frag_tag), skb_offset);
331 } while (skb_unprocessed > frag_cap);
334 return NET_XMIT_SUCCESS;
341 static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
343 struct ieee802154_addr sa, da;
344 struct ieee802154_mac_cb *cb = mac_cb_init(skb);
345 struct lowpan_addr_info info;
348 memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
350 /* TODO: Currently we only support extended_addr */
351 daddr = &info.daddr.u.extended_addr;
352 saddr = &info.saddr.u.extended_addr;
354 lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
356 cb->type = IEEE802154_FC_TYPE_DATA;
358 /* prepare wpan address data */
359 sa.mode = IEEE802154_ADDR_LONG;
360 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
361 sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
363 /* intra-PAN communications */
364 da.pan_id = sa.pan_id;
366 /* if the destination address is the broadcast address, use the
367 * corresponding short address
369 if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
370 da.mode = IEEE802154_ADDR_SHORT;
371 da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
374 da.mode = IEEE802154_ADDR_LONG;
375 da.extended_addr = ieee802154_devaddr_from_raw(daddr);
379 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
380 ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
383 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
385 struct ieee802154_hdr wpan_hdr;
388 pr_debug("package xmit\n");
390 /* We must take a copy of the skb before we modify/replace the ipv6
391 * header as the header could be used elsewhere
393 skb = skb_unshare(skb, GFP_ATOMIC);
395 return NET_XMIT_DROP;
397 ret = lowpan_header(skb, dev);
400 return NET_XMIT_DROP;
403 if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
405 return NET_XMIT_DROP;
408 max_single = ieee802154_max_payload(&wpan_hdr);
410 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
411 skb->dev = lowpan_dev_info(dev)->real_dev;
412 return dev_queue_xmit(skb);
416 pr_debug("frame is too big, fragmentation is needed\n");
417 rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
419 return rc < 0 ? NET_XMIT_DROP : rc;
423 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
425 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
427 return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
430 static __le16 lowpan_get_pan_id(const struct net_device *dev)
432 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
434 return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
437 static __le16 lowpan_get_short_addr(const struct net_device *dev)
439 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
441 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
444 static u8 lowpan_get_dsn(const struct net_device *dev)
446 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
448 return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
451 static struct header_ops lowpan_header_ops = {
452 .create = lowpan_header_create,
455 static struct lock_class_key lowpan_tx_busylock;
456 static struct lock_class_key lowpan_netdev_xmit_lock_key;
458 static void lowpan_set_lockdep_class_one(struct net_device *dev,
459 struct netdev_queue *txq,
462 lockdep_set_class(&txq->_xmit_lock,
463 &lowpan_netdev_xmit_lock_key);
467 static int lowpan_dev_init(struct net_device *dev)
469 netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
470 dev->qdisc_tx_busylock = &lowpan_tx_busylock;
474 static const struct net_device_ops lowpan_netdev_ops = {
475 .ndo_init = lowpan_dev_init,
476 .ndo_start_xmit = lowpan_xmit,
477 .ndo_set_mac_address = lowpan_set_address,
480 static struct ieee802154_mlme_ops lowpan_mlme = {
481 .get_pan_id = lowpan_get_pan_id,
482 .get_phy = lowpan_get_phy,
483 .get_short_addr = lowpan_get_short_addr,
484 .get_dsn = lowpan_get_dsn,
487 static void lowpan_setup(struct net_device *dev)
489 dev->addr_len = IEEE802154_ADDR_LEN;
490 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
491 dev->type = ARPHRD_IEEE802154;
492 /* Frame Control + Sequence Number + Address fields + Security Header */
493 dev->hard_header_len = 2 + 1 + 20 + 14;
494 dev->needed_tailroom = 2; /* FCS */
495 dev->mtu = IPV6_MIN_MTU;
496 dev->tx_queue_len = 0;
497 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
498 dev->watchdog_timeo = 0;
500 dev->netdev_ops = &lowpan_netdev_ops;
501 dev->header_ops = &lowpan_header_ops;
502 dev->ml_priv = &lowpan_mlme;
503 dev->destructor = free_netdev;
506 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
508 if (tb[IFLA_ADDRESS]) {
509 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
515 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
516 struct packet_type *pt, struct net_device *orig_dev)
518 struct ieee802154_hdr hdr;
521 skb = skb_share_check(skb, GFP_ATOMIC);
525 if (!netif_running(dev))
528 if (skb->pkt_type == PACKET_OTHERHOST)
531 if (dev->type != ARPHRD_IEEE802154)
534 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
537 /* check that it's our buffer */
538 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
539 /* Pull off the 1-byte of 6lowpan header. */
541 return lowpan_give_skb_to_devices(skb, NULL);
543 switch (skb->data[0] & 0xe0) {
544 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
545 ret = iphc_decompress(skb, &hdr);
549 return lowpan_give_skb_to_devices(skb, NULL);
550 case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
551 ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
553 ret = iphc_decompress(skb, &hdr);
557 return lowpan_give_skb_to_devices(skb, NULL);
558 } else if (ret == -1) {
561 return NET_RX_SUCCESS;
563 case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
564 ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
566 ret = iphc_decompress(skb, &hdr);
570 return lowpan_give_skb_to_devices(skb, NULL);
571 } else if (ret == -1) {
574 return NET_RX_SUCCESS;
587 static struct packet_type lowpan_packet_type = {
588 .type = htons(ETH_P_IEEE802154),
592 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
593 struct nlattr *tb[], struct nlattr *data[])
595 struct net_device *real_dev;
596 struct lowpan_dev_record *entry;
601 pr_debug("adding new link\n");
605 /* find and hold real wpan device */
606 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
609 if (real_dev->type != ARPHRD_IEEE802154) {
614 lowpan_dev_info(dev)->real_dev = real_dev;
615 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
617 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
620 lowpan_dev_info(dev)->real_dev = NULL;
626 /* Set the lowpan harware address to the wpan hardware address. */
627 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
629 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
630 INIT_LIST_HEAD(&entry->list);
631 list_add_tail(&entry->list, &lowpan_devices);
632 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
634 ret = register_netdevice(dev);
636 if (!lowpan_open_count)
637 dev_add_pack(&lowpan_packet_type);
644 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
646 struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
647 struct net_device *real_dev = lowpan_dev->real_dev;
648 struct lowpan_dev_record *entry, *tmp;
653 if (!lowpan_open_count)
654 dev_remove_pack(&lowpan_packet_type);
656 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
657 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
658 if (entry->ldev == dev) {
659 list_del(&entry->list);
663 mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
665 mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
667 unregister_netdevice_queue(dev, head);
672 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
674 .priv_size = sizeof(struct lowpan_dev_info),
675 .setup = lowpan_setup,
676 .newlink = lowpan_newlink,
677 .dellink = lowpan_dellink,
678 .validate = lowpan_validate,
681 static inline int __init lowpan_netlink_init(void)
683 return rtnl_link_register(&lowpan_link_ops);
686 static inline void lowpan_netlink_fini(void)
688 rtnl_link_unregister(&lowpan_link_ops);
691 static int lowpan_device_event(struct notifier_block *unused,
692 unsigned long event, void *ptr)
694 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
696 struct lowpan_dev_record *entry, *tmp;
698 if (dev->type != ARPHRD_IEEE802154)
701 if (event == NETDEV_UNREGISTER) {
702 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
703 if (lowpan_dev_info(entry->ldev)->real_dev == dev)
704 lowpan_dellink(entry->ldev, &del_list);
707 unregister_netdevice_many(&del_list);
714 static struct notifier_block lowpan_dev_notifier = {
715 .notifier_call = lowpan_device_event,
718 static int __init lowpan_init_module(void)
722 err = lowpan_net_frag_init();
726 err = lowpan_netlink_init();
730 err = register_netdevice_notifier(&lowpan_dev_notifier);
737 lowpan_netlink_fini();
739 lowpan_net_frag_exit();
744 static void __exit lowpan_cleanup_module(void)
746 lowpan_netlink_fini();
748 lowpan_net_frag_exit();
750 unregister_netdevice_notifier(&lowpan_dev_notifier);
753 module_init(lowpan_init_module);
754 module_exit(lowpan_cleanup_module);
755 MODULE_LICENSE("GPL");
756 MODULE_ALIAS_RTNL_LINK("lowpan");