seq/proc: modify seq_put_decimal_[u]ll to take a const char *, not char
[cascardo/linux.git] / net / sched / cls_matchall.c
1 /*
2  * net/sched/cls_matchll.c              Match-all classifier
3  *
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
18
19 struct cls_mall_filter {
20         struct tcf_exts exts;
21         struct tcf_result res;
22         u32 handle;
23         struct rcu_head rcu;
24         u32 flags;
25 };
26
27 struct cls_mall_head {
28         struct cls_mall_filter *filter;
29         struct rcu_head rcu;
30 };
31
32 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33                          struct tcf_result *res)
34 {
35         struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36         struct cls_mall_filter *f = head->filter;
37
38         if (tc_skip_sw(f->flags))
39                 return -1;
40
41         return tcf_exts_exec(skb, &f->exts, res);
42 }
43
44 static int mall_init(struct tcf_proto *tp)
45 {
46         struct cls_mall_head *head;
47
48         head = kzalloc(sizeof(*head), GFP_KERNEL);
49         if (!head)
50                 return -ENOBUFS;
51
52         rcu_assign_pointer(tp->root, head);
53
54         return 0;
55 }
56
57 static void mall_destroy_filter(struct rcu_head *head)
58 {
59         struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
60
61         tcf_exts_destroy(&f->exts);
62
63         kfree(f);
64 }
65
66 static int mall_replace_hw_filter(struct tcf_proto *tp,
67                                   struct cls_mall_filter *f,
68                                   unsigned long cookie)
69 {
70         struct net_device *dev = tp->q->dev_queue->dev;
71         struct tc_to_netdev offload;
72         struct tc_cls_matchall_offload mall_offload = {0};
73
74         offload.type = TC_SETUP_MATCHALL;
75         offload.cls_mall = &mall_offload;
76         offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77         offload.cls_mall->exts = &f->exts;
78         offload.cls_mall->cookie = cookie;
79
80         return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
81                                              &offload);
82 }
83
84 static void mall_destroy_hw_filter(struct tcf_proto *tp,
85                                    struct cls_mall_filter *f,
86                                    unsigned long cookie)
87 {
88         struct net_device *dev = tp->q->dev_queue->dev;
89         struct tc_to_netdev offload;
90         struct tc_cls_matchall_offload mall_offload = {0};
91
92         offload.type = TC_SETUP_MATCHALL;
93         offload.cls_mall = &mall_offload;
94         offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
95         offload.cls_mall->exts = NULL;
96         offload.cls_mall->cookie = cookie;
97
98         dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
99                                              &offload);
100 }
101
102 static bool mall_destroy(struct tcf_proto *tp, bool force)
103 {
104         struct cls_mall_head *head = rtnl_dereference(tp->root);
105         struct net_device *dev = tp->q->dev_queue->dev;
106         struct cls_mall_filter *f = head->filter;
107
108         if (!force && f)
109                 return false;
110
111         if (f) {
112                 if (tc_should_offload(dev, tp, f->flags))
113                         mall_destroy_hw_filter(tp, f, (unsigned long) f);
114
115                 call_rcu(&f->rcu, mall_destroy_filter);
116         }
117         RCU_INIT_POINTER(tp->root, NULL);
118         kfree_rcu(head, rcu);
119         return true;
120 }
121
122 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
123 {
124         struct cls_mall_head *head = rtnl_dereference(tp->root);
125         struct cls_mall_filter *f = head->filter;
126
127         if (f && f->handle == handle)
128                 return (unsigned long) f;
129         return 0;
130 }
131
132 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
133         [TCA_MATCHALL_UNSPEC]           = { .type = NLA_UNSPEC },
134         [TCA_MATCHALL_CLASSID]          = { .type = NLA_U32 },
135 };
136
137 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
138                           struct cls_mall_filter *f,
139                           unsigned long base, struct nlattr **tb,
140                           struct nlattr *est, bool ovr)
141 {
142         struct tcf_exts e;
143         int err;
144
145         tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
146         err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
147         if (err < 0)
148                 return err;
149
150         if (tb[TCA_MATCHALL_CLASSID]) {
151                 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
152                 tcf_bind_filter(tp, &f->res, base);
153         }
154
155         tcf_exts_change(tp, &f->exts, &e);
156
157         return 0;
158 }
159
160 static int mall_change(struct net *net, struct sk_buff *in_skb,
161                        struct tcf_proto *tp, unsigned long base,
162                        u32 handle, struct nlattr **tca,
163                        unsigned long *arg, bool ovr)
164 {
165         struct cls_mall_head *head = rtnl_dereference(tp->root);
166         struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
167         struct net_device *dev = tp->q->dev_queue->dev;
168         struct cls_mall_filter *f;
169         struct nlattr *tb[TCA_MATCHALL_MAX + 1];
170         u32 flags = 0;
171         int err;
172
173         if (!tca[TCA_OPTIONS])
174                 return -EINVAL;
175
176         if (head->filter)
177                 return -EBUSY;
178
179         if (fold)
180                 return -EINVAL;
181
182         err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
183                                tca[TCA_OPTIONS], mall_policy);
184         if (err < 0)
185                 return err;
186
187         if (tb[TCA_MATCHALL_FLAGS]) {
188                 flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
189                 if (!tc_flags_valid(flags))
190                         return -EINVAL;
191         }
192
193         f = kzalloc(sizeof(*f), GFP_KERNEL);
194         if (!f)
195                 return -ENOBUFS;
196
197         tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
198
199         if (!handle)
200                 handle = 1;
201         f->handle = handle;
202         f->flags = flags;
203
204         err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
205         if (err)
206                 goto errout;
207
208         if (tc_should_offload(dev, tp, flags)) {
209                 err = mall_replace_hw_filter(tp, f, (unsigned long) f);
210                 if (err) {
211                         if (tc_skip_sw(flags))
212                                 goto errout;
213                         else
214                                 err = 0;
215                 }
216         }
217
218         *arg = (unsigned long) f;
219         rcu_assign_pointer(head->filter, f);
220
221         return 0;
222
223 errout:
224         kfree(f);
225         return err;
226 }
227
228 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
229 {
230         struct cls_mall_head *head = rtnl_dereference(tp->root);
231         struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
232         struct net_device *dev = tp->q->dev_queue->dev;
233
234         if (tc_should_offload(dev, tp, f->flags))
235                 mall_destroy_hw_filter(tp, f, (unsigned long) f);
236
237         RCU_INIT_POINTER(head->filter, NULL);
238         tcf_unbind_filter(tp, &f->res);
239         call_rcu(&f->rcu, mall_destroy_filter);
240         return 0;
241 }
242
243 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
244 {
245         struct cls_mall_head *head = rtnl_dereference(tp->root);
246         struct cls_mall_filter *f = head->filter;
247
248         if (arg->count < arg->skip)
249                 goto skip;
250         if (arg->fn(tp, (unsigned long) f, arg) < 0)
251                 arg->stop = 1;
252 skip:
253         arg->count++;
254 }
255
256 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
257                      struct sk_buff *skb, struct tcmsg *t)
258 {
259         struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
260         struct nlattr *nest;
261
262         if (!f)
263                 return skb->len;
264
265         t->tcm_handle = f->handle;
266
267         nest = nla_nest_start(skb, TCA_OPTIONS);
268         if (!nest)
269                 goto nla_put_failure;
270
271         if (f->res.classid &&
272             nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
273                 goto nla_put_failure;
274
275         if (tcf_exts_dump(skb, &f->exts))
276                 goto nla_put_failure;
277
278         nla_nest_end(skb, nest);
279
280         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
281                 goto nla_put_failure;
282
283         return skb->len;
284
285 nla_put_failure:
286         nla_nest_cancel(skb, nest);
287         return -1;
288 }
289
290 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
291         .kind           = "matchall",
292         .classify       = mall_classify,
293         .init           = mall_init,
294         .destroy        = mall_destroy,
295         .get            = mall_get,
296         .change         = mall_change,
297         .delete         = mall_delete,
298         .walk           = mall_walk,
299         .dump           = mall_dump,
300         .owner          = THIS_MODULE,
301 };
302
303 static int __init cls_mall_init(void)
304 {
305         return register_tcf_proto_ops(&cls_mall_ops);
306 }
307
308 static void __exit cls_mall_exit(void)
309 {
310         unregister_tcf_proto_ops(&cls_mall_ops);
311 }
312
313 module_init(cls_mall_init);
314 module_exit(cls_mall_exit);
315
316 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
317 MODULE_DESCRIPTION("Match-all classifier");
318 MODULE_LICENSE("GPL v2");