6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl);
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
62 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
65 #ifdef CONFIG_AUDITSYSCALL
66 static void xfrm_audit_state_replay(struct xfrm_state *x,
67 struct sk_buff *skb, __be32 net_seq);
69 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
70 #endif /* CONFIG_AUDITSYSCALL */
72 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
75 unsigned short family)
77 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
80 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
81 xfrm_address_t *saddr,
82 unsigned short family)
84 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
87 static inline unsigned int
88 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
90 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
93 static void xfrm_hash_transfer(struct hlist_head *list,
94 struct hlist_head *ndsttable,
95 struct hlist_head *nsrctable,
96 struct hlist_head *nspitable,
97 unsigned int nhashmask)
99 struct hlist_node *entry, *tmp;
100 struct xfrm_state *x;
102 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
105 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
106 x->props.reqid, x->props.family,
108 hlist_add_head(&x->bydst, ndsttable+h);
110 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
113 hlist_add_head(&x->bysrc, nsrctable+h);
116 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
117 x->id.proto, x->props.family,
119 hlist_add_head(&x->byspi, nspitable+h);
124 static unsigned long xfrm_hash_new_size(void)
126 return ((xfrm_state_hmask + 1) << 1) *
127 sizeof(struct hlist_head);
130 static DEFINE_MUTEX(hash_resize_mutex);
132 static void xfrm_hash_resize(struct work_struct *__unused)
134 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
135 unsigned long nsize, osize;
136 unsigned int nhashmask, ohashmask;
139 mutex_lock(&hash_resize_mutex);
141 nsize = xfrm_hash_new_size();
142 ndst = xfrm_hash_alloc(nsize);
145 nsrc = xfrm_hash_alloc(nsize);
147 xfrm_hash_free(ndst, nsize);
150 nspi = xfrm_hash_alloc(nsize);
152 xfrm_hash_free(ndst, nsize);
153 xfrm_hash_free(nsrc, nsize);
157 spin_lock_bh(&xfrm_state_lock);
159 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
160 for (i = xfrm_state_hmask; i >= 0; i--)
161 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
164 odst = xfrm_state_bydst;
165 osrc = xfrm_state_bysrc;
166 ospi = xfrm_state_byspi;
167 ohashmask = xfrm_state_hmask;
169 xfrm_state_bydst = ndst;
170 xfrm_state_bysrc = nsrc;
171 xfrm_state_byspi = nspi;
172 xfrm_state_hmask = nhashmask;
174 spin_unlock_bh(&xfrm_state_lock);
176 osize = (ohashmask + 1) * sizeof(struct hlist_head);
177 xfrm_hash_free(odst, osize);
178 xfrm_hash_free(osrc, osize);
179 xfrm_hash_free(ospi, osize);
182 mutex_unlock(&hash_resize_mutex);
185 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
187 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
188 EXPORT_SYMBOL(km_waitq);
190 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
193 static struct work_struct xfrm_state_gc_work;
194 static HLIST_HEAD(xfrm_state_gc_list);
195 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
197 int __xfrm_state_delete(struct xfrm_state *x);
199 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
200 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
202 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
204 struct xfrm_state_afinfo *afinfo;
205 if (unlikely(family >= NPROTO))
207 write_lock_bh(&xfrm_state_afinfo_lock);
208 afinfo = xfrm_state_afinfo[family];
209 if (unlikely(!afinfo))
210 write_unlock_bh(&xfrm_state_afinfo_lock);
214 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
215 __releases(xfrm_state_afinfo_lock)
217 write_unlock_bh(&xfrm_state_afinfo_lock);
220 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
230 if (likely(typemap[type->proto] == NULL))
231 typemap[type->proto] = type;
234 xfrm_state_unlock_afinfo(afinfo);
237 EXPORT_SYMBOL(xfrm_register_type);
239 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
241 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
242 const struct xfrm_type **typemap;
245 if (unlikely(afinfo == NULL))
246 return -EAFNOSUPPORT;
247 typemap = afinfo->type_map;
249 if (unlikely(typemap[type->proto] != type))
252 typemap[type->proto] = NULL;
253 xfrm_state_unlock_afinfo(afinfo);
256 EXPORT_SYMBOL(xfrm_unregister_type);
258 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
260 struct xfrm_state_afinfo *afinfo;
261 const struct xfrm_type **typemap;
262 const struct xfrm_type *type;
263 int modload_attempted = 0;
266 afinfo = xfrm_state_get_afinfo(family);
267 if (unlikely(afinfo == NULL))
269 typemap = afinfo->type_map;
271 type = typemap[proto];
272 if (unlikely(type && !try_module_get(type->owner)))
274 if (!type && !modload_attempted) {
275 xfrm_state_put_afinfo(afinfo);
276 request_module("xfrm-type-%d-%d", family, proto);
277 modload_attempted = 1;
281 xfrm_state_put_afinfo(afinfo);
285 static void xfrm_put_type(const struct xfrm_type *type)
287 module_put(type->owner);
290 int xfrm_register_mode(struct xfrm_mode *mode, int family)
292 struct xfrm_state_afinfo *afinfo;
293 struct xfrm_mode **modemap;
296 if (unlikely(mode->encap >= XFRM_MODE_MAX))
299 afinfo = xfrm_state_lock_afinfo(family);
300 if (unlikely(afinfo == NULL))
301 return -EAFNOSUPPORT;
304 modemap = afinfo->mode_map;
305 if (modemap[mode->encap])
309 if (!try_module_get(afinfo->owner))
312 mode->afinfo = afinfo;
313 modemap[mode->encap] = mode;
317 xfrm_state_unlock_afinfo(afinfo);
320 EXPORT_SYMBOL(xfrm_register_mode);
322 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
324 struct xfrm_state_afinfo *afinfo;
325 struct xfrm_mode **modemap;
328 if (unlikely(mode->encap >= XFRM_MODE_MAX))
331 afinfo = xfrm_state_lock_afinfo(family);
332 if (unlikely(afinfo == NULL))
333 return -EAFNOSUPPORT;
336 modemap = afinfo->mode_map;
337 if (likely(modemap[mode->encap] == mode)) {
338 modemap[mode->encap] = NULL;
339 module_put(mode->afinfo->owner);
343 xfrm_state_unlock_afinfo(afinfo);
346 EXPORT_SYMBOL(xfrm_unregister_mode);
348 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
350 struct xfrm_state_afinfo *afinfo;
351 struct xfrm_mode *mode;
352 int modload_attempted = 0;
354 if (unlikely(encap >= XFRM_MODE_MAX))
358 afinfo = xfrm_state_get_afinfo(family);
359 if (unlikely(afinfo == NULL))
362 mode = afinfo->mode_map[encap];
363 if (unlikely(mode && !try_module_get(mode->owner)))
365 if (!mode && !modload_attempted) {
366 xfrm_state_put_afinfo(afinfo);
367 request_module("xfrm-mode-%d-%d", family, encap);
368 modload_attempted = 1;
372 xfrm_state_put_afinfo(afinfo);
376 static void xfrm_put_mode(struct xfrm_mode *mode)
378 module_put(mode->owner);
381 static void xfrm_state_gc_destroy(struct xfrm_state *x)
383 del_timer_sync(&x->timer);
384 del_timer_sync(&x->rtimer);
391 xfrm_put_mode(x->inner_mode);
392 if (x->inner_mode_iaf)
393 xfrm_put_mode(x->inner_mode_iaf);
395 xfrm_put_mode(x->outer_mode);
397 x->type->destructor(x);
398 xfrm_put_type(x->type);
400 security_xfrm_state_free(x);
404 static void xfrm_state_gc_task(struct work_struct *data)
406 struct xfrm_state *x;
407 struct hlist_node *entry, *tmp;
408 struct hlist_head gc_list;
410 spin_lock_bh(&xfrm_state_gc_lock);
411 hlist_move_list(&xfrm_state_gc_list, &gc_list);
412 spin_unlock_bh(&xfrm_state_gc_lock);
414 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
415 xfrm_state_gc_destroy(x);
420 static inline unsigned long make_jiffies(long secs)
422 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
423 return MAX_SCHEDULE_TIMEOUT-1;
428 static void xfrm_timer_handler(unsigned long data)
430 struct xfrm_state *x = (struct xfrm_state*)data;
431 unsigned long now = get_seconds();
432 long next = LONG_MAX;
437 if (x->km.state == XFRM_STATE_DEAD)
439 if (x->km.state == XFRM_STATE_EXPIRED)
441 if (x->lft.hard_add_expires_seconds) {
442 long tmo = x->lft.hard_add_expires_seconds +
443 x->curlft.add_time - now;
449 if (x->lft.hard_use_expires_seconds) {
450 long tmo = x->lft.hard_use_expires_seconds +
451 (x->curlft.use_time ? : now) - now;
459 if (x->lft.soft_add_expires_seconds) {
460 long tmo = x->lft.soft_add_expires_seconds +
461 x->curlft.add_time - now;
467 if (x->lft.soft_use_expires_seconds) {
468 long tmo = x->lft.soft_use_expires_seconds +
469 (x->curlft.use_time ? : now) - now;
478 km_state_expired(x, 0, 0);
480 if (next != LONG_MAX)
481 mod_timer(&x->timer, jiffies + make_jiffies(next));
486 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
487 x->km.state = XFRM_STATE_EXPIRED;
493 err = __xfrm_state_delete(x);
494 if (!err && x->id.spi)
495 km_state_expired(x, 1, 0);
497 xfrm_audit_state_delete(x, err ? 0 : 1,
498 audit_get_loginuid(current),
499 audit_get_sessionid(current), 0);
502 spin_unlock(&x->lock);
505 static void xfrm_replay_timer_handler(unsigned long data);
507 struct xfrm_state *xfrm_state_alloc(void)
509 struct xfrm_state *x;
511 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
514 atomic_set(&x->refcnt, 1);
515 atomic_set(&x->tunnel_users, 0);
516 INIT_LIST_HEAD(&x->km.all);
517 INIT_HLIST_NODE(&x->bydst);
518 INIT_HLIST_NODE(&x->bysrc);
519 INIT_HLIST_NODE(&x->byspi);
520 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
521 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
523 x->curlft.add_time = get_seconds();
524 x->lft.soft_byte_limit = XFRM_INF;
525 x->lft.soft_packet_limit = XFRM_INF;
526 x->lft.hard_byte_limit = XFRM_INF;
527 x->lft.hard_packet_limit = XFRM_INF;
528 x->replay_maxage = 0;
529 x->replay_maxdiff = 0;
530 x->inner_mode = NULL;
531 x->inner_mode_iaf = NULL;
532 spin_lock_init(&x->lock);
536 EXPORT_SYMBOL(xfrm_state_alloc);
538 void __xfrm_state_destroy(struct xfrm_state *x)
540 WARN_ON(x->km.state != XFRM_STATE_DEAD);
542 spin_lock_bh(&xfrm_state_gc_lock);
543 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
544 spin_unlock_bh(&xfrm_state_gc_lock);
545 schedule_work(&xfrm_state_gc_work);
547 EXPORT_SYMBOL(__xfrm_state_destroy);
549 int __xfrm_state_delete(struct xfrm_state *x)
553 if (x->km.state != XFRM_STATE_DEAD) {
554 x->km.state = XFRM_STATE_DEAD;
555 spin_lock(&xfrm_state_lock);
556 list_del(&x->km.all);
557 hlist_del(&x->bydst);
558 hlist_del(&x->bysrc);
560 hlist_del(&x->byspi);
562 spin_unlock(&xfrm_state_lock);
564 /* All xfrm_state objects are created by xfrm_state_alloc.
565 * The xfrm_state_alloc call gives a reference, and that
566 * is what we are dropping here.
574 EXPORT_SYMBOL(__xfrm_state_delete);
576 int xfrm_state_delete(struct xfrm_state *x)
580 spin_lock_bh(&x->lock);
581 err = __xfrm_state_delete(x);
582 spin_unlock_bh(&x->lock);
586 EXPORT_SYMBOL(xfrm_state_delete);
588 #ifdef CONFIG_SECURITY_NETWORK_XFRM
590 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
594 for (i = 0; i <= xfrm_state_hmask; i++) {
595 struct hlist_node *entry;
596 struct xfrm_state *x;
598 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
599 if (xfrm_id_proto_match(x->id.proto, proto) &&
600 (err = security_xfrm_state_delete(x)) != 0) {
601 xfrm_audit_state_delete(x, 0,
602 audit_info->loginuid,
603 audit_info->sessionid,
614 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
620 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
624 spin_lock_bh(&xfrm_state_lock);
625 err = xfrm_state_flush_secctx_check(proto, audit_info);
629 for (i = 0; i <= xfrm_state_hmask; i++) {
630 struct hlist_node *entry;
631 struct xfrm_state *x;
633 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
634 if (!xfrm_state_kern(x) &&
635 xfrm_id_proto_match(x->id.proto, proto)) {
637 spin_unlock_bh(&xfrm_state_lock);
639 err = xfrm_state_delete(x);
640 xfrm_audit_state_delete(x, err ? 0 : 1,
641 audit_info->loginuid,
642 audit_info->sessionid,
646 spin_lock_bh(&xfrm_state_lock);
654 spin_unlock_bh(&xfrm_state_lock);
658 EXPORT_SYMBOL(xfrm_state_flush);
660 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
662 spin_lock_bh(&xfrm_state_lock);
663 si->sadcnt = xfrm_state_num;
664 si->sadhcnt = xfrm_state_hmask;
665 si->sadhmcnt = xfrm_state_hashmax;
666 spin_unlock_bh(&xfrm_state_lock);
668 EXPORT_SYMBOL(xfrm_sad_getinfo);
671 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
672 struct xfrm_tmpl *tmpl,
673 xfrm_address_t *daddr, xfrm_address_t *saddr,
674 unsigned short family)
676 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
679 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
680 xfrm_state_put_afinfo(afinfo);
684 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
686 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
687 struct xfrm_state *x;
688 struct hlist_node *entry;
690 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
691 if (x->props.family != family ||
693 x->id.proto != proto)
698 if (x->id.daddr.a4 != daddr->a4)
702 if (!ipv6_addr_equal((struct in6_addr *)daddr,
716 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
718 unsigned int h = xfrm_src_hash(daddr, saddr, family);
719 struct xfrm_state *x;
720 struct hlist_node *entry;
722 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
723 if (x->props.family != family ||
724 x->id.proto != proto)
729 if (x->id.daddr.a4 != daddr->a4 ||
730 x->props.saddr.a4 != saddr->a4)
734 if (!ipv6_addr_equal((struct in6_addr *)daddr,
737 !ipv6_addr_equal((struct in6_addr *)saddr,
751 static inline struct xfrm_state *
752 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
755 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
756 x->id.proto, family);
758 return __xfrm_state_lookup_byaddr(&x->id.daddr,
760 x->id.proto, family);
763 static void xfrm_hash_grow_check(int have_hash_collision)
765 if (have_hash_collision &&
766 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
767 xfrm_state_num > xfrm_state_hmask)
768 schedule_work(&xfrm_hash_work);
772 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
773 struct flowi *fl, struct xfrm_tmpl *tmpl,
774 struct xfrm_policy *pol, int *err,
775 unsigned short family)
778 struct hlist_node *entry;
779 struct xfrm_state *x, *x0, *to_put;
780 int acquire_in_progress = 0;
782 struct xfrm_state *best = NULL;
786 spin_lock_bh(&xfrm_state_lock);
787 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
788 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
789 if (x->props.family == family &&
790 x->props.reqid == tmpl->reqid &&
791 !(x->props.flags & XFRM_STATE_WILDRECV) &&
792 xfrm_state_addr_check(x, daddr, saddr, family) &&
793 tmpl->mode == x->props.mode &&
794 tmpl->id.proto == x->id.proto &&
795 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
797 1. There is a valid state with matching selector.
799 2. Valid state with inappropriate selector. Skip.
801 Entering area of "sysdeps".
803 3. If state is not valid, selector is temporary,
804 it selects only session which triggered
805 previous resolution. Key manager will do
806 something to install a state with proper
809 if (x->km.state == XFRM_STATE_VALID) {
810 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
811 !security_xfrm_state_pol_flow_match(x, pol, fl))
814 best->km.dying > x->km.dying ||
815 (best->km.dying == x->km.dying &&
816 best->curlft.add_time < x->curlft.add_time))
818 } else if (x->km.state == XFRM_STATE_ACQ) {
819 acquire_in_progress = 1;
820 } else if (x->km.state == XFRM_STATE_ERROR ||
821 x->km.state == XFRM_STATE_EXPIRED) {
822 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
823 security_xfrm_state_pol_flow_match(x, pol, fl))
830 if (!x && !error && !acquire_in_progress) {
832 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
833 tmpl->id.proto, family)) != NULL) {
838 x = xfrm_state_alloc();
843 /* Initialize temporary selector matching only
844 * to current session. */
845 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
849 x->km.state = XFRM_STATE_DEAD;
855 if (km_query(x, tmpl, pol) == 0) {
856 x->km.state = XFRM_STATE_ACQ;
857 list_add(&x->km.all, &xfrm_state_all);
858 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
859 h = xfrm_src_hash(daddr, saddr, family);
860 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
862 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
863 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
865 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
866 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
867 add_timer(&x->timer);
869 xfrm_hash_grow_check(x->bydst.next != NULL);
871 x->km.state = XFRM_STATE_DEAD;
881 *err = acquire_in_progress ? -EAGAIN : error;
882 spin_unlock_bh(&xfrm_state_lock);
884 xfrm_state_put(to_put);
889 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
890 unsigned short family, u8 mode, u8 proto, u32 reqid)
893 struct xfrm_state *rx = NULL, *x = NULL;
894 struct hlist_node *entry;
896 spin_lock(&xfrm_state_lock);
897 h = xfrm_dst_hash(daddr, saddr, reqid, family);
898 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
899 if (x->props.family == family &&
900 x->props.reqid == reqid &&
901 !(x->props.flags & XFRM_STATE_WILDRECV) &&
902 xfrm_state_addr_check(x, daddr, saddr, family) &&
903 mode == x->props.mode &&
904 proto == x->id.proto &&
905 x->km.state == XFRM_STATE_VALID) {
913 spin_unlock(&xfrm_state_lock);
918 EXPORT_SYMBOL(xfrm_stateonly_find);
920 static void __xfrm_state_insert(struct xfrm_state *x)
924 x->genid = ++xfrm_state_genid;
926 list_add(&x->km.all, &xfrm_state_all);
928 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
929 x->props.reqid, x->props.family);
930 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
932 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
933 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
936 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
939 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
942 mod_timer(&x->timer, jiffies + HZ);
943 if (x->replay_maxage)
944 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
950 xfrm_hash_grow_check(x->bydst.next != NULL);
953 /* xfrm_state_lock is held */
954 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
956 unsigned short family = xnew->props.family;
957 u32 reqid = xnew->props.reqid;
958 struct xfrm_state *x;
959 struct hlist_node *entry;
962 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
963 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
964 if (x->props.family == family &&
965 x->props.reqid == reqid &&
966 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
967 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
968 x->genid = xfrm_state_genid;
972 void xfrm_state_insert(struct xfrm_state *x)
974 spin_lock_bh(&xfrm_state_lock);
975 __xfrm_state_bump_genids(x);
976 __xfrm_state_insert(x);
977 spin_unlock_bh(&xfrm_state_lock);
979 EXPORT_SYMBOL(xfrm_state_insert);
981 /* xfrm_state_lock is held */
982 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
984 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
985 struct hlist_node *entry;
986 struct xfrm_state *x;
988 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
989 if (x->props.reqid != reqid ||
990 x->props.mode != mode ||
991 x->props.family != family ||
992 x->km.state != XFRM_STATE_ACQ ||
994 x->id.proto != proto)
999 if (x->id.daddr.a4 != daddr->a4 ||
1000 x->props.saddr.a4 != saddr->a4)
1004 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1005 (struct in6_addr *)daddr) ||
1006 !ipv6_addr_equal((struct in6_addr *)
1008 (struct in6_addr *)saddr))
1020 x = xfrm_state_alloc();
1024 x->sel.daddr.a4 = daddr->a4;
1025 x->sel.saddr.a4 = saddr->a4;
1026 x->sel.prefixlen_d = 32;
1027 x->sel.prefixlen_s = 32;
1028 x->props.saddr.a4 = saddr->a4;
1029 x->id.daddr.a4 = daddr->a4;
1033 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1034 (struct in6_addr *)daddr);
1035 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1036 (struct in6_addr *)saddr);
1037 x->sel.prefixlen_d = 128;
1038 x->sel.prefixlen_s = 128;
1039 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1040 (struct in6_addr *)saddr);
1041 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1042 (struct in6_addr *)daddr);
1046 x->km.state = XFRM_STATE_ACQ;
1047 x->id.proto = proto;
1048 x->props.family = family;
1049 x->props.mode = mode;
1050 x->props.reqid = reqid;
1051 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1053 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1054 add_timer(&x->timer);
1055 list_add(&x->km.all, &xfrm_state_all);
1056 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1057 h = xfrm_src_hash(daddr, saddr, family);
1058 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1062 xfrm_hash_grow_check(x->bydst.next != NULL);
1068 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1070 int xfrm_state_add(struct xfrm_state *x)
1072 struct xfrm_state *x1, *to_put;
1075 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1077 family = x->props.family;
1081 spin_lock_bh(&xfrm_state_lock);
1083 x1 = __xfrm_state_locate(x, use_spi, family);
1091 if (use_spi && x->km.seq) {
1092 x1 = __xfrm_find_acq_byseq(x->km.seq);
1093 if (x1 && ((x1->id.proto != x->id.proto) ||
1094 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1101 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1103 &x->id.daddr, &x->props.saddr, 0);
1105 __xfrm_state_bump_genids(x);
1106 __xfrm_state_insert(x);
1110 spin_unlock_bh(&xfrm_state_lock);
1113 xfrm_state_delete(x1);
1118 xfrm_state_put(to_put);
1122 EXPORT_SYMBOL(xfrm_state_add);
1124 #ifdef CONFIG_XFRM_MIGRATE
1125 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1128 struct xfrm_state *x = xfrm_state_alloc();
1132 memcpy(&x->id, &orig->id, sizeof(x->id));
1133 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1134 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1135 x->props.mode = orig->props.mode;
1136 x->props.replay_window = orig->props.replay_window;
1137 x->props.reqid = orig->props.reqid;
1138 x->props.family = orig->props.family;
1139 x->props.saddr = orig->props.saddr;
1142 x->aalg = xfrm_algo_clone(orig->aalg);
1146 x->props.aalgo = orig->props.aalgo;
1149 x->ealg = xfrm_algo_clone(orig->ealg);
1153 x->props.ealgo = orig->props.ealgo;
1156 x->calg = xfrm_algo_clone(orig->calg);
1160 x->props.calgo = orig->props.calgo;
1163 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1169 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1175 err = xfrm_init_state(x);
1179 x->props.flags = orig->props.flags;
1181 x->curlft.add_time = orig->curlft.add_time;
1182 x->km.state = orig->km.state;
1183 x->km.seq = orig->km.seq;
1201 /* xfrm_state_lock is held */
1202 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1205 struct xfrm_state *x;
1206 struct hlist_node *entry;
1209 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1210 m->reqid, m->old_family);
1211 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1212 if (x->props.mode != m->mode ||
1213 x->id.proto != m->proto)
1215 if (m->reqid && x->props.reqid != m->reqid)
1217 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1219 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1226 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1228 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1229 if (x->props.mode != m->mode ||
1230 x->id.proto != m->proto)
1232 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1234 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1244 EXPORT_SYMBOL(xfrm_migrate_state_find);
1246 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1247 struct xfrm_migrate *m)
1249 struct xfrm_state *xc;
1252 xc = xfrm_state_clone(x, &err);
1256 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1257 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1260 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1261 /* a care is needed when the destination address of the
1262 state is to be updated as it is a part of triplet */
1263 xfrm_state_insert(xc);
1265 if ((err = xfrm_state_add(xc)) < 0)
1274 EXPORT_SYMBOL(xfrm_state_migrate);
1277 int xfrm_state_update(struct xfrm_state *x)
1279 struct xfrm_state *x1, *to_put;
1281 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1285 spin_lock_bh(&xfrm_state_lock);
1286 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1292 if (xfrm_state_kern(x1)) {
1298 if (x1->km.state == XFRM_STATE_ACQ) {
1299 __xfrm_state_insert(x);
1305 spin_unlock_bh(&xfrm_state_lock);
1308 xfrm_state_put(to_put);
1314 xfrm_state_delete(x1);
1320 spin_lock_bh(&x1->lock);
1321 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1322 if (x->encap && x1->encap)
1323 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1324 if (x->coaddr && x1->coaddr) {
1325 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1327 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1328 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1329 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1332 mod_timer(&x1->timer, jiffies + HZ);
1333 if (x1->curlft.use_time)
1334 xfrm_state_check_expire(x1);
1338 spin_unlock_bh(&x1->lock);
1344 EXPORT_SYMBOL(xfrm_state_update);
1346 int xfrm_state_check_expire(struct xfrm_state *x)
1348 if (!x->curlft.use_time)
1349 x->curlft.use_time = get_seconds();
1351 if (x->km.state != XFRM_STATE_VALID)
1354 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1355 x->curlft.packets >= x->lft.hard_packet_limit) {
1356 x->km.state = XFRM_STATE_EXPIRED;
1357 mod_timer(&x->timer, jiffies);
1362 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1363 x->curlft.packets >= x->lft.soft_packet_limit)) {
1365 km_state_expired(x, 0, 0);
1369 EXPORT_SYMBOL(xfrm_state_check_expire);
1372 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1373 unsigned short family)
1375 struct xfrm_state *x;
1377 spin_lock_bh(&xfrm_state_lock);
1378 x = __xfrm_state_lookup(daddr, spi, proto, family);
1379 spin_unlock_bh(&xfrm_state_lock);
1382 EXPORT_SYMBOL(xfrm_state_lookup);
1385 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1386 u8 proto, unsigned short family)
1388 struct xfrm_state *x;
1390 spin_lock_bh(&xfrm_state_lock);
1391 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1392 spin_unlock_bh(&xfrm_state_lock);
1395 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1398 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1399 xfrm_address_t *daddr, xfrm_address_t *saddr,
1400 int create, unsigned short family)
1402 struct xfrm_state *x;
1404 spin_lock_bh(&xfrm_state_lock);
1405 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1406 spin_unlock_bh(&xfrm_state_lock);
1410 EXPORT_SYMBOL(xfrm_find_acq);
1412 #ifdef CONFIG_XFRM_SUB_POLICY
1414 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1415 unsigned short family)
1418 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1420 return -EAFNOSUPPORT;
1422 spin_lock_bh(&xfrm_state_lock);
1423 if (afinfo->tmpl_sort)
1424 err = afinfo->tmpl_sort(dst, src, n);
1425 spin_unlock_bh(&xfrm_state_lock);
1426 xfrm_state_put_afinfo(afinfo);
1429 EXPORT_SYMBOL(xfrm_tmpl_sort);
1432 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1433 unsigned short family)
1436 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1438 return -EAFNOSUPPORT;
1440 spin_lock_bh(&xfrm_state_lock);
1441 if (afinfo->state_sort)
1442 err = afinfo->state_sort(dst, src, n);
1443 spin_unlock_bh(&xfrm_state_lock);
1444 xfrm_state_put_afinfo(afinfo);
1447 EXPORT_SYMBOL(xfrm_state_sort);
1450 /* Silly enough, but I'm lazy to build resolution list */
1452 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1456 for (i = 0; i <= xfrm_state_hmask; i++) {
1457 struct hlist_node *entry;
1458 struct xfrm_state *x;
1460 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1461 if (x->km.seq == seq &&
1462 x->km.state == XFRM_STATE_ACQ) {
1471 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1473 struct xfrm_state *x;
1475 spin_lock_bh(&xfrm_state_lock);
1476 x = __xfrm_find_acq_byseq(seq);
1477 spin_unlock_bh(&xfrm_state_lock);
1480 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1482 u32 xfrm_get_acqseq(void)
1486 static DEFINE_SPINLOCK(acqseq_lock);
1488 spin_lock_bh(&acqseq_lock);
1489 res = (++acqseq ? : ++acqseq);
1490 spin_unlock_bh(&acqseq_lock);
1493 EXPORT_SYMBOL(xfrm_get_acqseq);
1495 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1498 struct xfrm_state *x0;
1500 __be32 minspi = htonl(low);
1501 __be32 maxspi = htonl(high);
1503 spin_lock_bh(&x->lock);
1504 if (x->km.state == XFRM_STATE_DEAD)
1513 if (minspi == maxspi) {
1514 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1522 for (h=0; h<high-low+1; h++) {
1523 spi = low + net_random()%(high-low+1);
1524 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1526 x->id.spi = htonl(spi);
1533 spin_lock_bh(&xfrm_state_lock);
1534 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1535 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1536 spin_unlock_bh(&xfrm_state_lock);
1542 spin_unlock_bh(&x->lock);
1546 EXPORT_SYMBOL(xfrm_alloc_spi);
1548 int xfrm_state_walk(struct xfrm_state_walk *walk,
1549 int (*func)(struct xfrm_state *, int, void*),
1552 struct xfrm_state *state;
1553 struct xfrm_state_walk *x;
1556 if (walk->seq != 0 && list_empty(&walk->all))
1559 spin_lock_bh(&xfrm_state_lock);
1560 if (list_empty(&walk->all))
1561 x = list_first_entry(&xfrm_state_all, struct xfrm_state_walk, all);
1563 x = list_entry(&walk->all, struct xfrm_state_walk, all);
1564 list_for_each_entry_from(x, &xfrm_state_all, all) {
1565 if (x->state == XFRM_STATE_DEAD)
1567 state = container_of(x, struct xfrm_state, km);
1568 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1570 err = func(state, walk->seq, data);
1572 list_move_tail(&walk->all, &x->all);
1577 if (walk->seq == 0) {
1581 list_del_init(&walk->all);
1583 spin_unlock_bh(&xfrm_state_lock);
1586 EXPORT_SYMBOL(xfrm_state_walk);
1588 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1590 INIT_LIST_HEAD(&walk->all);
1591 walk->proto = proto;
1592 walk->state = XFRM_STATE_DEAD;
1595 EXPORT_SYMBOL(xfrm_state_walk_init);
1597 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1599 if (list_empty(&walk->all))
1602 spin_lock_bh(&xfrm_state_lock);
1603 list_del(&walk->all);
1604 spin_lock_bh(&xfrm_state_lock);
1606 EXPORT_SYMBOL(xfrm_state_walk_done);
1609 void xfrm_replay_notify(struct xfrm_state *x, int event)
1612 /* we send notify messages in case
1613 * 1. we updated on of the sequence numbers, and the seqno difference
1614 * is at least x->replay_maxdiff, in this case we also update the
1615 * timeout of our timer function
1616 * 2. if x->replay_maxage has elapsed since last update,
1617 * and there were changes
1619 * The state structure must be locked!
1623 case XFRM_REPLAY_UPDATE:
1624 if (x->replay_maxdiff &&
1625 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1626 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1627 if (x->xflags & XFRM_TIME_DEFER)
1628 event = XFRM_REPLAY_TIMEOUT;
1635 case XFRM_REPLAY_TIMEOUT:
1636 if ((x->replay.seq == x->preplay.seq) &&
1637 (x->replay.bitmap == x->preplay.bitmap) &&
1638 (x->replay.oseq == x->preplay.oseq)) {
1639 x->xflags |= XFRM_TIME_DEFER;
1646 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1647 c.event = XFRM_MSG_NEWAE;
1648 c.data.aevent = event;
1649 km_state_notify(x, &c);
1651 if (x->replay_maxage &&
1652 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1653 x->xflags &= ~XFRM_TIME_DEFER;
1656 static void xfrm_replay_timer_handler(unsigned long data)
1658 struct xfrm_state *x = (struct xfrm_state*)data;
1660 spin_lock(&x->lock);
1662 if (x->km.state == XFRM_STATE_VALID) {
1663 if (xfrm_aevent_is_on())
1664 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1666 x->xflags |= XFRM_TIME_DEFER;
1669 spin_unlock(&x->lock);
1672 int xfrm_replay_check(struct xfrm_state *x,
1673 struct sk_buff *skb, __be32 net_seq)
1676 u32 seq = ntohl(net_seq);
1678 if (unlikely(seq == 0))
1681 if (likely(seq > x->replay.seq))
1684 diff = x->replay.seq - seq;
1685 if (diff >= min_t(unsigned int, x->props.replay_window,
1686 sizeof(x->replay.bitmap) * 8)) {
1687 x->stats.replay_window++;
1691 if (x->replay.bitmap & (1U << diff)) {
1698 xfrm_audit_state_replay(x, skb, net_seq);
1702 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1705 u32 seq = ntohl(net_seq);
1707 if (seq > x->replay.seq) {
1708 diff = seq - x->replay.seq;
1709 if (diff < x->props.replay_window)
1710 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1712 x->replay.bitmap = 1;
1713 x->replay.seq = seq;
1715 diff = x->replay.seq - seq;
1716 x->replay.bitmap |= (1U << diff);
1719 if (xfrm_aevent_is_on())
1720 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1723 static LIST_HEAD(xfrm_km_list);
1724 static DEFINE_RWLOCK(xfrm_km_lock);
1726 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1728 struct xfrm_mgr *km;
1730 read_lock(&xfrm_km_lock);
1731 list_for_each_entry(km, &xfrm_km_list, list)
1732 if (km->notify_policy)
1733 km->notify_policy(xp, dir, c);
1734 read_unlock(&xfrm_km_lock);
1737 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1739 struct xfrm_mgr *km;
1740 read_lock(&xfrm_km_lock);
1741 list_for_each_entry(km, &xfrm_km_list, list)
1744 read_unlock(&xfrm_km_lock);
1747 EXPORT_SYMBOL(km_policy_notify);
1748 EXPORT_SYMBOL(km_state_notify);
1750 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1756 c.event = XFRM_MSG_EXPIRE;
1757 km_state_notify(x, &c);
1763 EXPORT_SYMBOL(km_state_expired);
1765 * We send to all registered managers regardless of failure
1766 * We are happy with one success
1768 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1770 int err = -EINVAL, acqret;
1771 struct xfrm_mgr *km;
1773 read_lock(&xfrm_km_lock);
1774 list_for_each_entry(km, &xfrm_km_list, list) {
1775 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1779 read_unlock(&xfrm_km_lock);
1782 EXPORT_SYMBOL(km_query);
1784 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1787 struct xfrm_mgr *km;
1789 read_lock(&xfrm_km_lock);
1790 list_for_each_entry(km, &xfrm_km_list, list) {
1791 if (km->new_mapping)
1792 err = km->new_mapping(x, ipaddr, sport);
1796 read_unlock(&xfrm_km_lock);
1799 EXPORT_SYMBOL(km_new_mapping);
1801 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1807 c.event = XFRM_MSG_POLEXPIRE;
1808 km_policy_notify(pol, dir, &c);
1813 EXPORT_SYMBOL(km_policy_expired);
1815 #ifdef CONFIG_XFRM_MIGRATE
1816 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1817 struct xfrm_migrate *m, int num_migrate)
1821 struct xfrm_mgr *km;
1823 read_lock(&xfrm_km_lock);
1824 list_for_each_entry(km, &xfrm_km_list, list) {
1826 ret = km->migrate(sel, dir, type, m, num_migrate);
1831 read_unlock(&xfrm_km_lock);
1834 EXPORT_SYMBOL(km_migrate);
1837 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1841 struct xfrm_mgr *km;
1843 read_lock(&xfrm_km_lock);
1844 list_for_each_entry(km, &xfrm_km_list, list) {
1846 ret = km->report(proto, sel, addr);
1851 read_unlock(&xfrm_km_lock);
1854 EXPORT_SYMBOL(km_report);
1856 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1860 struct xfrm_mgr *km;
1861 struct xfrm_policy *pol = NULL;
1863 if (optlen <= 0 || optlen > PAGE_SIZE)
1866 data = kmalloc(optlen, GFP_KERNEL);
1871 if (copy_from_user(data, optval, optlen))
1875 read_lock(&xfrm_km_lock);
1876 list_for_each_entry(km, &xfrm_km_list, list) {
1877 pol = km->compile_policy(sk, optname, data,
1882 read_unlock(&xfrm_km_lock);
1885 xfrm_sk_policy_insert(sk, err, pol);
1894 EXPORT_SYMBOL(xfrm_user_policy);
1896 int xfrm_register_km(struct xfrm_mgr *km)
1898 write_lock_bh(&xfrm_km_lock);
1899 list_add_tail(&km->list, &xfrm_km_list);
1900 write_unlock_bh(&xfrm_km_lock);
1903 EXPORT_SYMBOL(xfrm_register_km);
1905 int xfrm_unregister_km(struct xfrm_mgr *km)
1907 write_lock_bh(&xfrm_km_lock);
1908 list_del(&km->list);
1909 write_unlock_bh(&xfrm_km_lock);
1912 EXPORT_SYMBOL(xfrm_unregister_km);
1914 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1917 if (unlikely(afinfo == NULL))
1919 if (unlikely(afinfo->family >= NPROTO))
1920 return -EAFNOSUPPORT;
1921 write_lock_bh(&xfrm_state_afinfo_lock);
1922 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1925 xfrm_state_afinfo[afinfo->family] = afinfo;
1926 write_unlock_bh(&xfrm_state_afinfo_lock);
1929 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1931 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1934 if (unlikely(afinfo == NULL))
1936 if (unlikely(afinfo->family >= NPROTO))
1937 return -EAFNOSUPPORT;
1938 write_lock_bh(&xfrm_state_afinfo_lock);
1939 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1940 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1943 xfrm_state_afinfo[afinfo->family] = NULL;
1945 write_unlock_bh(&xfrm_state_afinfo_lock);
1948 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1950 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1952 struct xfrm_state_afinfo *afinfo;
1953 if (unlikely(family >= NPROTO))
1955 read_lock(&xfrm_state_afinfo_lock);
1956 afinfo = xfrm_state_afinfo[family];
1957 if (unlikely(!afinfo))
1958 read_unlock(&xfrm_state_afinfo_lock);
1962 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1963 __releases(xfrm_state_afinfo_lock)
1965 read_unlock(&xfrm_state_afinfo_lock);
1968 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1969 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1972 struct xfrm_state *t = x->tunnel;
1974 if (atomic_read(&t->tunnel_users) == 2)
1975 xfrm_state_delete(t);
1976 atomic_dec(&t->tunnel_users);
1981 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1983 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1987 spin_lock_bh(&x->lock);
1988 if (x->km.state == XFRM_STATE_VALID &&
1989 x->type && x->type->get_mtu)
1990 res = x->type->get_mtu(x, mtu);
1992 res = mtu - x->props.header_len;
1993 spin_unlock_bh(&x->lock);
1997 int xfrm_init_state(struct xfrm_state *x)
1999 struct xfrm_state_afinfo *afinfo;
2000 struct xfrm_mode *inner_mode;
2001 int family = x->props.family;
2004 err = -EAFNOSUPPORT;
2005 afinfo = xfrm_state_get_afinfo(family);
2010 if (afinfo->init_flags)
2011 err = afinfo->init_flags(x);
2013 xfrm_state_put_afinfo(afinfo);
2018 err = -EPROTONOSUPPORT;
2020 if (x->sel.family != AF_UNSPEC) {
2021 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2022 if (inner_mode == NULL)
2025 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2026 family != x->sel.family) {
2027 xfrm_put_mode(inner_mode);
2031 x->inner_mode = inner_mode;
2033 struct xfrm_mode *inner_mode_iaf;
2035 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2036 if (inner_mode == NULL)
2039 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2040 xfrm_put_mode(inner_mode);
2044 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2045 if (inner_mode_iaf == NULL)
2048 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2049 xfrm_put_mode(inner_mode_iaf);
2053 if (x->props.family == AF_INET) {
2054 x->inner_mode = inner_mode;
2055 x->inner_mode_iaf = inner_mode_iaf;
2057 x->inner_mode = inner_mode_iaf;
2058 x->inner_mode_iaf = inner_mode;
2062 x->type = xfrm_get_type(x->id.proto, family);
2063 if (x->type == NULL)
2066 err = x->type->init_state(x);
2070 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2071 if (x->outer_mode == NULL)
2074 x->km.state = XFRM_STATE_VALID;
2080 EXPORT_SYMBOL(xfrm_init_state);
2082 void __init xfrm_state_init(void)
2086 sz = sizeof(struct hlist_head) * 8;
2088 xfrm_state_bydst = xfrm_hash_alloc(sz);
2089 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2090 xfrm_state_byspi = xfrm_hash_alloc(sz);
2091 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2092 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2093 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2095 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2098 #ifdef CONFIG_AUDITSYSCALL
2099 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2100 struct audit_buffer *audit_buf)
2102 struct xfrm_sec_ctx *ctx = x->security;
2103 u32 spi = ntohl(x->id.spi);
2106 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2107 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2109 switch(x->props.family) {
2111 audit_log_format(audit_buf,
2112 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2113 NIPQUAD(x->props.saddr.a4),
2114 NIPQUAD(x->id.daddr.a4));
2117 audit_log_format(audit_buf,
2118 " src=" NIP6_FMT " dst=" NIP6_FMT,
2119 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2120 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2124 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2127 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2128 struct audit_buffer *audit_buf)
2131 struct ipv6hdr *iph6;
2136 audit_log_format(audit_buf,
2137 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2138 NIPQUAD(iph4->saddr),
2139 NIPQUAD(iph4->daddr));
2142 iph6 = ipv6_hdr(skb);
2143 audit_log_format(audit_buf,
2144 " src=" NIP6_FMT " dst=" NIP6_FMT
2145 " flowlbl=0x%x%02x%02x",
2148 iph6->flow_lbl[0] & 0x0f,
2155 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2156 uid_t auid, u32 sessionid, u32 secid)
2158 struct audit_buffer *audit_buf;
2160 audit_buf = xfrm_audit_start("SAD-add");
2161 if (audit_buf == NULL)
2163 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2164 xfrm_audit_helper_sainfo(x, audit_buf);
2165 audit_log_format(audit_buf, " res=%u", result);
2166 audit_log_end(audit_buf);
2168 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2170 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2171 uid_t auid, u32 sessionid, u32 secid)
2173 struct audit_buffer *audit_buf;
2175 audit_buf = xfrm_audit_start("SAD-delete");
2176 if (audit_buf == NULL)
2178 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2179 xfrm_audit_helper_sainfo(x, audit_buf);
2180 audit_log_format(audit_buf, " res=%u", result);
2181 audit_log_end(audit_buf);
2183 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2185 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2186 struct sk_buff *skb)
2188 struct audit_buffer *audit_buf;
2191 audit_buf = xfrm_audit_start("SA-replay-overflow");
2192 if (audit_buf == NULL)
2194 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2195 /* don't record the sequence number because it's inherent in this kind
2196 * of audit message */
2197 spi = ntohl(x->id.spi);
2198 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2199 audit_log_end(audit_buf);
2201 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2203 static void xfrm_audit_state_replay(struct xfrm_state *x,
2204 struct sk_buff *skb, __be32 net_seq)
2206 struct audit_buffer *audit_buf;
2209 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2210 if (audit_buf == NULL)
2212 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2213 spi = ntohl(x->id.spi);
2214 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2215 spi, spi, ntohl(net_seq));
2216 audit_log_end(audit_buf);
2219 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2221 struct audit_buffer *audit_buf;
2223 audit_buf = xfrm_audit_start("SA-notfound");
2224 if (audit_buf == NULL)
2226 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2227 audit_log_end(audit_buf);
2229 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2231 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2232 __be32 net_spi, __be32 net_seq)
2234 struct audit_buffer *audit_buf;
2237 audit_buf = xfrm_audit_start("SA-notfound");
2238 if (audit_buf == NULL)
2240 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2241 spi = ntohl(net_spi);
2242 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2243 spi, spi, ntohl(net_seq));
2244 audit_log_end(audit_buf);
2246 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2248 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2249 struct sk_buff *skb, u8 proto)
2251 struct audit_buffer *audit_buf;
2255 audit_buf = xfrm_audit_start("SA-icv-failure");
2256 if (audit_buf == NULL)
2258 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2259 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2260 u32 spi = ntohl(net_spi);
2261 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2262 spi, spi, ntohl(net_seq));
2264 audit_log_end(audit_buf);
2266 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2267 #endif /* CONFIG_AUDITSYSCALL */