1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
20 static LIST_HEAD(bpf_map_types);
22 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
24 struct bpf_map_type_list *tl;
27 list_for_each_entry(tl, &bpf_map_types, list_node) {
28 if (tl->type == attr->map_type) {
29 map = tl->ops->map_alloc(attr);
33 map->map_type = attr->map_type;
37 return ERR_PTR(-EINVAL);
40 /* boot time registration of different map implementations */
41 void bpf_register_map_type(struct bpf_map_type_list *tl)
43 list_add(&tl->list_node, &bpf_map_types);
46 /* called from workqueue */
47 static void bpf_map_free_deferred(struct work_struct *work)
49 struct bpf_map *map = container_of(work, struct bpf_map, work);
51 /* implementation dependent freeing */
52 map->ops->map_free(map);
55 /* decrement map refcnt and schedule it for freeing via workqueue
56 * (unrelying map implementation ops->map_free() might sleep)
58 void bpf_map_put(struct bpf_map *map)
60 if (atomic_dec_and_test(&map->refcnt)) {
61 INIT_WORK(&map->work, bpf_map_free_deferred);
62 schedule_work(&map->work);
66 static int bpf_map_release(struct inode *inode, struct file *filp)
68 struct bpf_map *map = filp->private_data;
74 static const struct file_operations bpf_map_fops = {
75 .release = bpf_map_release,
78 /* helper macro to check that unused fields 'union bpf_attr' are zero */
79 #define CHECK_ATTR(CMD) \
80 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
81 sizeof(attr->CMD##_LAST_FIELD), 0, \
83 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
84 sizeof(attr->CMD##_LAST_FIELD)) != NULL
86 #define BPF_MAP_CREATE_LAST_FIELD max_entries
87 /* called via syscall */
88 static int map_create(union bpf_attr *attr)
93 err = CHECK_ATTR(BPF_MAP_CREATE);
97 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
98 map = find_and_alloc_map(attr);
102 atomic_set(&map->refcnt, 1);
104 err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
107 /* failed to allocate fd */
113 map->ops->map_free(map);
117 /* if error is returned, fd is released.
118 * On success caller should complete fd access with matching fdput()
120 struct bpf_map *bpf_map_get(struct fd f)
125 return ERR_PTR(-EBADF);
127 if (f.file->f_op != &bpf_map_fops) {
129 return ERR_PTR(-EINVAL);
132 map = f.file->private_data;
137 /* helper to convert user pointers passed inside __aligned_u64 fields */
138 static void __user *u64_to_ptr(__u64 val)
140 return (void __user *) (unsigned long) val;
143 /* last field in 'union bpf_attr' used by this command */
144 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
146 static int map_lookup_elem(union bpf_attr *attr)
148 void __user *ukey = u64_to_ptr(attr->key);
149 void __user *uvalue = u64_to_ptr(attr->value);
150 int ufd = attr->map_fd;
151 struct fd f = fdget(ufd);
153 void *key, *value, *ptr;
156 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
159 map = bpf_map_get(f);
164 key = kmalloc(map->key_size, GFP_USER);
169 if (copy_from_user(key, ukey, map->key_size) != 0)
173 value = kmalloc(map->value_size, GFP_USER);
178 ptr = map->ops->map_lookup_elem(map, key);
180 memcpy(value, ptr, map->value_size);
188 if (copy_to_user(uvalue, value, map->value_size) != 0)
202 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
204 static int map_update_elem(union bpf_attr *attr)
206 void __user *ukey = u64_to_ptr(attr->key);
207 void __user *uvalue = u64_to_ptr(attr->value);
208 int ufd = attr->map_fd;
209 struct fd f = fdget(ufd);
214 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
217 map = bpf_map_get(f);
222 key = kmalloc(map->key_size, GFP_USER);
227 if (copy_from_user(key, ukey, map->key_size) != 0)
231 value = kmalloc(map->value_size, GFP_USER);
236 if (copy_from_user(value, uvalue, map->value_size) != 0)
239 /* eBPF program that use maps are running under rcu_read_lock(),
240 * therefore all map accessors rely on this fact, so do the same here
243 err = map->ops->map_update_elem(map, key, value, attr->flags);
255 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
257 static int map_delete_elem(union bpf_attr *attr)
259 void __user *ukey = u64_to_ptr(attr->key);
260 int ufd = attr->map_fd;
261 struct fd f = fdget(ufd);
266 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
269 map = bpf_map_get(f);
274 key = kmalloc(map->key_size, GFP_USER);
279 if (copy_from_user(key, ukey, map->key_size) != 0)
283 err = map->ops->map_delete_elem(map, key);
293 /* last field in 'union bpf_attr' used by this command */
294 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
296 static int map_get_next_key(union bpf_attr *attr)
298 void __user *ukey = u64_to_ptr(attr->key);
299 void __user *unext_key = u64_to_ptr(attr->next_key);
300 int ufd = attr->map_fd;
301 struct fd f = fdget(ufd);
303 void *key, *next_key;
306 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
309 map = bpf_map_get(f);
314 key = kmalloc(map->key_size, GFP_USER);
319 if (copy_from_user(key, ukey, map->key_size) != 0)
323 next_key = kmalloc(map->key_size, GFP_USER);
328 err = map->ops->map_get_next_key(map, key, next_key);
334 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
348 static LIST_HEAD(bpf_prog_types);
350 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
352 struct bpf_prog_type_list *tl;
354 list_for_each_entry(tl, &bpf_prog_types, list_node) {
355 if (tl->type == type) {
356 prog->aux->ops = tl->ops;
365 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
367 list_add(&tl->list_node, &bpf_prog_types);
370 /* fixup insn->imm field of bpf_call instructions:
371 * if (insn->imm == BPF_FUNC_map_lookup_elem)
372 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
373 * else if (insn->imm == BPF_FUNC_map_update_elem)
374 * insn->imm = bpf_map_update_elem - __bpf_call_base;
377 * this function is called after eBPF program passed verification
379 static void fixup_bpf_calls(struct bpf_prog *prog)
381 const struct bpf_func_proto *fn;
384 for (i = 0; i < prog->len; i++) {
385 struct bpf_insn *insn = &prog->insnsi[i];
387 if (insn->code == (BPF_JMP | BPF_CALL)) {
388 /* we reach here when program has bpf_call instructions
389 * and it passed bpf_check(), means that
390 * ops->get_func_proto must have been supplied, check it
392 BUG_ON(!prog->aux->ops->get_func_proto);
394 fn = prog->aux->ops->get_func_proto(insn->imm);
395 /* all functions that have prototype and verifier allowed
396 * programs to call them, must be real in-kernel functions
399 insn->imm = fn->func - __bpf_call_base;
404 /* drop refcnt on maps used by eBPF program and free auxilary data */
405 static void free_used_maps(struct bpf_prog_aux *aux)
409 for (i = 0; i < aux->used_map_cnt; i++)
410 bpf_map_put(aux->used_maps[i]);
412 kfree(aux->used_maps);
415 void bpf_prog_put(struct bpf_prog *prog)
417 if (atomic_dec_and_test(&prog->aux->refcnt)) {
418 free_used_maps(prog->aux);
422 EXPORT_SYMBOL_GPL(bpf_prog_put);
424 static int bpf_prog_release(struct inode *inode, struct file *filp)
426 struct bpf_prog *prog = filp->private_data;
432 static const struct file_operations bpf_prog_fops = {
433 .release = bpf_prog_release,
436 static struct bpf_prog *get_prog(struct fd f)
438 struct bpf_prog *prog;
441 return ERR_PTR(-EBADF);
443 if (f.file->f_op != &bpf_prog_fops) {
445 return ERR_PTR(-EINVAL);
448 prog = f.file->private_data;
453 /* called by sockets/tracing/seccomp before attaching program to an event
454 * pairs with bpf_prog_put()
456 struct bpf_prog *bpf_prog_get(u32 ufd)
458 struct fd f = fdget(ufd);
459 struct bpf_prog *prog;
466 atomic_inc(&prog->aux->refcnt);
470 EXPORT_SYMBOL_GPL(bpf_prog_get);
472 /* last field in 'union bpf_attr' used by this command */
473 #define BPF_PROG_LOAD_LAST_FIELD log_buf
475 static int bpf_prog_load(union bpf_attr *attr)
477 enum bpf_prog_type type = attr->prog_type;
478 struct bpf_prog *prog;
483 if (CHECK_ATTR(BPF_PROG_LOAD))
486 /* copy eBPF program license from user space */
487 if (strncpy_from_user(license, u64_to_ptr(attr->license),
488 sizeof(license) - 1) < 0)
490 license[sizeof(license) - 1] = 0;
492 /* eBPF programs must be GPL compatible to use GPL-ed functions */
493 is_gpl = license_is_gpl_compatible(license);
495 if (attr->insn_cnt >= BPF_MAXINSNS)
498 /* plain bpf_prog allocation */
499 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
503 prog->len = attr->insn_cnt;
506 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
507 prog->len * sizeof(struct bpf_insn)) != 0)
510 prog->orig_prog = NULL;
513 atomic_set(&prog->aux->refcnt, 1);
514 prog->gpl_compatible = is_gpl;
516 /* find program type: socket_filter vs tracing_filter */
517 err = find_prog_type(type, prog);
521 /* run eBPF verifier */
522 err = bpf_check(&prog, attr);
526 /* fixup BPF_CALL->imm field */
527 fixup_bpf_calls(prog);
529 /* eBPF program is ready to be JITed */
530 bpf_prog_select_runtime(prog);
532 err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
534 /* failed to allocate fd */
540 free_used_maps(prog->aux);
546 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
548 union bpf_attr attr = {};
551 /* the syscall is limited to root temporarily. This restriction will be
552 * lifted when security audit is clean. Note that eBPF+tracing must have
553 * this restriction, since it may pass kernel data to user space
555 if (!capable(CAP_SYS_ADMIN))
558 if (!access_ok(VERIFY_READ, uattr, 1))
561 if (size > PAGE_SIZE) /* silly large */
564 /* If we're handed a bigger struct than we know of,
565 * ensure all the unknown bits are 0 - i.e. new
566 * user-space does not rely on any kernel feature
567 * extensions we dont know about yet.
569 if (size > sizeof(attr)) {
570 unsigned char __user *addr;
571 unsigned char __user *end;
574 addr = (void __user *)uattr + sizeof(attr);
575 end = (void __user *)uattr + size;
577 for (; addr < end; addr++) {
578 err = get_user(val, addr);
587 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
588 if (copy_from_user(&attr, uattr, size) != 0)
593 err = map_create(&attr);
595 case BPF_MAP_LOOKUP_ELEM:
596 err = map_lookup_elem(&attr);
598 case BPF_MAP_UPDATE_ELEM:
599 err = map_update_elem(&attr);
601 case BPF_MAP_DELETE_ELEM:
602 err = map_delete_elem(&attr);
604 case BPF_MAP_GET_NEXT_KEY:
605 err = map_get_next_key(&attr);
608 err = bpf_prog_load(&attr);