Merge tag 'doc-4.8-fixes' of git://git.lwn.net/linux
[cascardo/linux.git] / net / sched / sch_fq_codel.c
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/codel.h>
27 #include <net/codel_impl.h>
28 #include <net/codel_qdisc.h>
29
30 /*      Fair Queue CoDel.
31  *
32  * Principles :
33  * Packets are classified (internal classifier or external) on flows.
34  * This is a Stochastic model (as we use a hash, several flows
35  *                             might be hashed on same slot)
36  * Each flow has a CoDel managed queue.
37  * Flows are linked onto two (Round Robin) lists,
38  * so that new flows have priority on old ones.
39  *
40  * For a given flow, packets are not reordered (CoDel uses a FIFO)
41  * head drops only.
42  * ECN capability is on by default.
43  * Low memory footprint (64 bytes per flow)
44  */
45
46 struct fq_codel_flow {
47         struct sk_buff    *head;
48         struct sk_buff    *tail;
49         struct list_head  flowchain;
50         int               deficit;
51         u32               dropped; /* number of drops (or ECN marks) on this flow */
52         struct codel_vars cvars;
53 }; /* please try to keep this structure <= 64 bytes */
54
55 struct fq_codel_sched_data {
56         struct tcf_proto __rcu *filter_list; /* optional external classifier */
57         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
58         u32             *backlogs;      /* backlog table [flows_cnt] */
59         u32             flows_cnt;      /* number of flows */
60         u32             perturbation;   /* hash perturbation */
61         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
62         u32             drop_batch_size;
63         u32             memory_limit;
64         struct codel_params cparams;
65         struct codel_stats cstats;
66         u32             memory_usage;
67         u32             drop_overmemory;
68         u32             drop_overlimit;
69         u32             new_flow_count;
70
71         struct list_head new_flows;     /* list of new flows */
72         struct list_head old_flows;     /* list of old flows */
73 };
74
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
76                                   struct sk_buff *skb)
77 {
78         u32 hash = skb_get_hash_perturb(skb, q->perturbation);
79
80         return reciprocal_scale(hash, q->flows_cnt);
81 }
82
83 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84                                       int *qerr)
85 {
86         struct fq_codel_sched_data *q = qdisc_priv(sch);
87         struct tcf_proto *filter;
88         struct tcf_result res;
89         int result;
90
91         if (TC_H_MAJ(skb->priority) == sch->handle &&
92             TC_H_MIN(skb->priority) > 0 &&
93             TC_H_MIN(skb->priority) <= q->flows_cnt)
94                 return TC_H_MIN(skb->priority);
95
96         filter = rcu_dereference_bh(q->filter_list);
97         if (!filter)
98                 return fq_codel_hash(q, skb) + 1;
99
100         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
101         result = tc_classify(skb, filter, &res, false);
102         if (result >= 0) {
103 #ifdef CONFIG_NET_CLS_ACT
104                 switch (result) {
105                 case TC_ACT_STOLEN:
106                 case TC_ACT_QUEUED:
107                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108                 case TC_ACT_SHOT:
109                         return 0;
110                 }
111 #endif
112                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
113                         return TC_H_MIN(res.classid);
114         }
115         return 0;
116 }
117
118 /* helper functions : might be changed when/if skb use a standard list_head */
119
120 /* remove one skb from head of slot queue */
121 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
122 {
123         struct sk_buff *skb = flow->head;
124
125         flow->head = skb->next;
126         skb->next = NULL;
127         return skb;
128 }
129
130 /* add skb to flow queue (tail add) */
131 static inline void flow_queue_add(struct fq_codel_flow *flow,
132                                   struct sk_buff *skb)
133 {
134         if (flow->head == NULL)
135                 flow->head = skb;
136         else
137                 flow->tail->next = skb;
138         flow->tail = skb;
139         skb->next = NULL;
140 }
141
142 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
143                                   struct sk_buff **to_free)
144 {
145         struct fq_codel_sched_data *q = qdisc_priv(sch);
146         struct sk_buff *skb;
147         unsigned int maxbacklog = 0, idx = 0, i, len;
148         struct fq_codel_flow *flow;
149         unsigned int threshold;
150         unsigned int mem = 0;
151
152         /* Queue is full! Find the fat flow and drop packet(s) from it.
153          * This might sound expensive, but with 1024 flows, we scan
154          * 4KB of memory, and we dont need to handle a complex tree
155          * in fast path (packet queue/enqueue) with many cache misses.
156          * In stress mode, we'll try to drop 64 packets from the flow,
157          * amortizing this linear lookup to one cache line per drop.
158          */
159         for (i = 0; i < q->flows_cnt; i++) {
160                 if (q->backlogs[i] > maxbacklog) {
161                         maxbacklog = q->backlogs[i];
162                         idx = i;
163                 }
164         }
165
166         /* Our goal is to drop half of this fat flow backlog */
167         threshold = maxbacklog >> 1;
168
169         flow = &q->flows[idx];
170         len = 0;
171         i = 0;
172         do {
173                 skb = dequeue_head(flow);
174                 len += qdisc_pkt_len(skb);
175                 mem += get_codel_cb(skb)->mem_usage;
176                 __qdisc_drop(skb, to_free);
177         } while (++i < max_packets && len < threshold);
178
179         flow->dropped += i;
180         q->backlogs[idx] -= len;
181         q->memory_usage -= mem;
182         sch->qstats.drops += i;
183         sch->qstats.backlog -= len;
184         sch->q.qlen -= i;
185         return idx;
186 }
187
188 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
189                             struct sk_buff **to_free)
190 {
191         struct fq_codel_sched_data *q = qdisc_priv(sch);
192         unsigned int idx, prev_backlog, prev_qlen;
193         struct fq_codel_flow *flow;
194         int uninitialized_var(ret);
195         unsigned int pkt_len;
196         bool memory_limited;
197
198         idx = fq_codel_classify(skb, sch, &ret);
199         if (idx == 0) {
200                 if (ret & __NET_XMIT_BYPASS)
201                         qdisc_qstats_drop(sch);
202                 __qdisc_drop(skb, to_free);
203                 return ret;
204         }
205         idx--;
206
207         codel_set_enqueue_time(skb);
208         flow = &q->flows[idx];
209         flow_queue_add(flow, skb);
210         q->backlogs[idx] += qdisc_pkt_len(skb);
211         qdisc_qstats_backlog_inc(sch, skb);
212
213         if (list_empty(&flow->flowchain)) {
214                 list_add_tail(&flow->flowchain, &q->new_flows);
215                 q->new_flow_count++;
216                 flow->deficit = q->quantum;
217                 flow->dropped = 0;
218         }
219         get_codel_cb(skb)->mem_usage = skb->truesize;
220         q->memory_usage += get_codel_cb(skb)->mem_usage;
221         memory_limited = q->memory_usage > q->memory_limit;
222         if (++sch->q.qlen <= sch->limit && !memory_limited)
223                 return NET_XMIT_SUCCESS;
224
225         prev_backlog = sch->qstats.backlog;
226         prev_qlen = sch->q.qlen;
227
228         /* save this packet length as it might be dropped by fq_codel_drop() */
229         pkt_len = qdisc_pkt_len(skb);
230         /* fq_codel_drop() is quite expensive, as it performs a linear search
231          * in q->backlogs[] to find a fat flow.
232          * So instead of dropping a single packet, drop half of its backlog
233          * with a 64 packets limit to not add a too big cpu spike here.
234          */
235         ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
236
237         prev_qlen -= sch->q.qlen;
238         prev_backlog -= sch->qstats.backlog;
239         q->drop_overlimit += prev_qlen;
240         if (memory_limited)
241                 q->drop_overmemory += prev_qlen;
242
243         /* As we dropped packet(s), better let upper stack know this.
244          * If we dropped a packet for this flow, return NET_XMIT_CN,
245          * but in this case, our parents wont increase their backlogs.
246          */
247         if (ret == idx) {
248                 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
249                                           prev_backlog - pkt_len);
250                 return NET_XMIT_CN;
251         }
252         qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
253         return NET_XMIT_SUCCESS;
254 }
255
256 /* This is the specific function called from codel_dequeue()
257  * to dequeue a packet from queue. Note: backlog is handled in
258  * codel, we dont need to reduce it here.
259  */
260 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
261 {
262         struct Qdisc *sch = ctx;
263         struct fq_codel_sched_data *q = qdisc_priv(sch);
264         struct fq_codel_flow *flow;
265         struct sk_buff *skb = NULL;
266
267         flow = container_of(vars, struct fq_codel_flow, cvars);
268         if (flow->head) {
269                 skb = dequeue_head(flow);
270                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
271                 q->memory_usage -= get_codel_cb(skb)->mem_usage;
272                 sch->q.qlen--;
273                 sch->qstats.backlog -= qdisc_pkt_len(skb);
274         }
275         return skb;
276 }
277
278 static void drop_func(struct sk_buff *skb, void *ctx)
279 {
280         struct Qdisc *sch = ctx;
281
282         kfree_skb(skb);
283         qdisc_qstats_drop(sch);
284 }
285
286 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
287 {
288         struct fq_codel_sched_data *q = qdisc_priv(sch);
289         struct sk_buff *skb;
290         struct fq_codel_flow *flow;
291         struct list_head *head;
292         u32 prev_drop_count, prev_ecn_mark;
293         unsigned int prev_backlog;
294
295 begin:
296         head = &q->new_flows;
297         if (list_empty(head)) {
298                 head = &q->old_flows;
299                 if (list_empty(head))
300                         return NULL;
301         }
302         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
303
304         if (flow->deficit <= 0) {
305                 flow->deficit += q->quantum;
306                 list_move_tail(&flow->flowchain, &q->old_flows);
307                 goto begin;
308         }
309
310         prev_drop_count = q->cstats.drop_count;
311         prev_ecn_mark = q->cstats.ecn_mark;
312         prev_backlog = sch->qstats.backlog;
313
314         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
315                             &flow->cvars, &q->cstats, qdisc_pkt_len,
316                             codel_get_enqueue_time, drop_func, dequeue_func);
317
318         flow->dropped += q->cstats.drop_count - prev_drop_count;
319         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
320
321         if (!skb) {
322                 /* force a pass through old_flows to prevent starvation */
323                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
324                         list_move_tail(&flow->flowchain, &q->old_flows);
325                 else
326                         list_del_init(&flow->flowchain);
327                 goto begin;
328         }
329         qdisc_bstats_update(sch, skb);
330         flow->deficit -= qdisc_pkt_len(skb);
331         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
332          * or HTB crashes. Defer it for next round.
333          */
334         if (q->cstats.drop_count && sch->q.qlen) {
335                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
336                                           q->cstats.drop_len);
337                 q->cstats.drop_count = 0;
338                 q->cstats.drop_len = 0;
339         }
340         return skb;
341 }
342
343 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
344 {
345         rtnl_kfree_skbs(flow->head, flow->tail);
346         flow->head = NULL;
347 }
348
349 static void fq_codel_reset(struct Qdisc *sch)
350 {
351         struct fq_codel_sched_data *q = qdisc_priv(sch);
352         int i;
353
354         INIT_LIST_HEAD(&q->new_flows);
355         INIT_LIST_HEAD(&q->old_flows);
356         for (i = 0; i < q->flows_cnt; i++) {
357                 struct fq_codel_flow *flow = q->flows + i;
358
359                 fq_codel_flow_purge(flow);
360                 INIT_LIST_HEAD(&flow->flowchain);
361                 codel_vars_init(&flow->cvars);
362         }
363         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
364         sch->q.qlen = 0;
365         sch->qstats.backlog = 0;
366         q->memory_usage = 0;
367 }
368
369 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
370         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
371         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
372         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
373         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
374         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
375         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
376         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
377         [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
378         [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
379 };
380
381 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
382 {
383         struct fq_codel_sched_data *q = qdisc_priv(sch);
384         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
385         int err;
386
387         if (!opt)
388                 return -EINVAL;
389
390         err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
391         if (err < 0)
392                 return err;
393         if (tb[TCA_FQ_CODEL_FLOWS]) {
394                 if (q->flows)
395                         return -EINVAL;
396                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
397                 if (!q->flows_cnt ||
398                     q->flows_cnt > 65536)
399                         return -EINVAL;
400         }
401         sch_tree_lock(sch);
402
403         if (tb[TCA_FQ_CODEL_TARGET]) {
404                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
405
406                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
407         }
408
409         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
410                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
411
412                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
413         }
414
415         if (tb[TCA_FQ_CODEL_INTERVAL]) {
416                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
417
418                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
419         }
420
421         if (tb[TCA_FQ_CODEL_LIMIT])
422                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
423
424         if (tb[TCA_FQ_CODEL_ECN])
425                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
426
427         if (tb[TCA_FQ_CODEL_QUANTUM])
428                 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
429
430         if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
431                 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
432
433         if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
434                 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
435
436         while (sch->q.qlen > sch->limit ||
437                q->memory_usage > q->memory_limit) {
438                 struct sk_buff *skb = fq_codel_dequeue(sch);
439
440                 q->cstats.drop_len += qdisc_pkt_len(skb);
441                 rtnl_kfree_skbs(skb, skb);
442                 q->cstats.drop_count++;
443         }
444         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
445         q->cstats.drop_count = 0;
446         q->cstats.drop_len = 0;
447
448         sch_tree_unlock(sch);
449         return 0;
450 }
451
452 static void *fq_codel_zalloc(size_t sz)
453 {
454         void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
455
456         if (!ptr)
457                 ptr = vzalloc(sz);
458         return ptr;
459 }
460
461 static void fq_codel_free(void *addr)
462 {
463         kvfree(addr);
464 }
465
466 static void fq_codel_destroy(struct Qdisc *sch)
467 {
468         struct fq_codel_sched_data *q = qdisc_priv(sch);
469
470         tcf_destroy_chain(&q->filter_list);
471         fq_codel_free(q->backlogs);
472         fq_codel_free(q->flows);
473 }
474
475 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
476 {
477         struct fq_codel_sched_data *q = qdisc_priv(sch);
478         int i;
479
480         sch->limit = 10*1024;
481         q->flows_cnt = 1024;
482         q->memory_limit = 32 << 20; /* 32 MBytes */
483         q->drop_batch_size = 64;
484         q->quantum = psched_mtu(qdisc_dev(sch));
485         q->perturbation = prandom_u32();
486         INIT_LIST_HEAD(&q->new_flows);
487         INIT_LIST_HEAD(&q->old_flows);
488         codel_params_init(&q->cparams);
489         codel_stats_init(&q->cstats);
490         q->cparams.ecn = true;
491         q->cparams.mtu = psched_mtu(qdisc_dev(sch));
492
493         if (opt) {
494                 int err = fq_codel_change(sch, opt);
495                 if (err)
496                         return err;
497         }
498
499         if (!q->flows) {
500                 q->flows = fq_codel_zalloc(q->flows_cnt *
501                                            sizeof(struct fq_codel_flow));
502                 if (!q->flows)
503                         return -ENOMEM;
504                 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
505                 if (!q->backlogs) {
506                         fq_codel_free(q->flows);
507                         return -ENOMEM;
508                 }
509                 for (i = 0; i < q->flows_cnt; i++) {
510                         struct fq_codel_flow *flow = q->flows + i;
511
512                         INIT_LIST_HEAD(&flow->flowchain);
513                         codel_vars_init(&flow->cvars);
514                 }
515         }
516         if (sch->limit >= 1)
517                 sch->flags |= TCQ_F_CAN_BYPASS;
518         else
519                 sch->flags &= ~TCQ_F_CAN_BYPASS;
520         return 0;
521 }
522
523 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
524 {
525         struct fq_codel_sched_data *q = qdisc_priv(sch);
526         struct nlattr *opts;
527
528         opts = nla_nest_start(skb, TCA_OPTIONS);
529         if (opts == NULL)
530                 goto nla_put_failure;
531
532         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
533                         codel_time_to_us(q->cparams.target)) ||
534             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
535                         sch->limit) ||
536             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
537                         codel_time_to_us(q->cparams.interval)) ||
538             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
539                         q->cparams.ecn) ||
540             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
541                         q->quantum) ||
542             nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
543                         q->drop_batch_size) ||
544             nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
545                         q->memory_limit) ||
546             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
547                         q->flows_cnt))
548                 goto nla_put_failure;
549
550         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
551             nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
552                         codel_time_to_us(q->cparams.ce_threshold)))
553                 goto nla_put_failure;
554
555         return nla_nest_end(skb, opts);
556
557 nla_put_failure:
558         return -1;
559 }
560
561 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
562 {
563         struct fq_codel_sched_data *q = qdisc_priv(sch);
564         struct tc_fq_codel_xstats st = {
565                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
566         };
567         struct list_head *pos;
568
569         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
570         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
571         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
572         st.qdisc_stats.new_flow_count = q->new_flow_count;
573         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
574         st.qdisc_stats.memory_usage  = q->memory_usage;
575         st.qdisc_stats.drop_overmemory = q->drop_overmemory;
576
577         sch_tree_lock(sch);
578         list_for_each(pos, &q->new_flows)
579                 st.qdisc_stats.new_flows_len++;
580
581         list_for_each(pos, &q->old_flows)
582                 st.qdisc_stats.old_flows_len++;
583         sch_tree_unlock(sch);
584
585         return gnet_stats_copy_app(d, &st, sizeof(st));
586 }
587
588 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
589 {
590         return NULL;
591 }
592
593 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
594 {
595         return 0;
596 }
597
598 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
599                               u32 classid)
600 {
601         /* we cannot bypass queue discipline anymore */
602         sch->flags &= ~TCQ_F_CAN_BYPASS;
603         return 0;
604 }
605
606 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
607 {
608 }
609
610 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
611                                                   unsigned long cl)
612 {
613         struct fq_codel_sched_data *q = qdisc_priv(sch);
614
615         if (cl)
616                 return NULL;
617         return &q->filter_list;
618 }
619
620 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
621                           struct sk_buff *skb, struct tcmsg *tcm)
622 {
623         tcm->tcm_handle |= TC_H_MIN(cl);
624         return 0;
625 }
626
627 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
628                                      struct gnet_dump *d)
629 {
630         struct fq_codel_sched_data *q = qdisc_priv(sch);
631         u32 idx = cl - 1;
632         struct gnet_stats_queue qs = { 0 };
633         struct tc_fq_codel_xstats xstats;
634
635         if (idx < q->flows_cnt) {
636                 const struct fq_codel_flow *flow = &q->flows[idx];
637                 const struct sk_buff *skb;
638
639                 memset(&xstats, 0, sizeof(xstats));
640                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
641                 xstats.class_stats.deficit = flow->deficit;
642                 xstats.class_stats.ldelay =
643                         codel_time_to_us(flow->cvars.ldelay);
644                 xstats.class_stats.count = flow->cvars.count;
645                 xstats.class_stats.lastcount = flow->cvars.lastcount;
646                 xstats.class_stats.dropping = flow->cvars.dropping;
647                 if (flow->cvars.dropping) {
648                         codel_tdiff_t delta = flow->cvars.drop_next -
649                                               codel_get_time();
650
651                         xstats.class_stats.drop_next = (delta >= 0) ?
652                                 codel_time_to_us(delta) :
653                                 -codel_time_to_us(-delta);
654                 }
655                 if (flow->head) {
656                         sch_tree_lock(sch);
657                         skb = flow->head;
658                         while (skb) {
659                                 qs.qlen++;
660                                 skb = skb->next;
661                         }
662                         sch_tree_unlock(sch);
663                 }
664                 qs.backlog = q->backlogs[idx];
665                 qs.drops = flow->dropped;
666         }
667         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
668                 return -1;
669         if (idx < q->flows_cnt)
670                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
671         return 0;
672 }
673
674 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
675 {
676         struct fq_codel_sched_data *q = qdisc_priv(sch);
677         unsigned int i;
678
679         if (arg->stop)
680                 return;
681
682         for (i = 0; i < q->flows_cnt; i++) {
683                 if (list_empty(&q->flows[i].flowchain) ||
684                     arg->count < arg->skip) {
685                         arg->count++;
686                         continue;
687                 }
688                 if (arg->fn(sch, i + 1, arg) < 0) {
689                         arg->stop = 1;
690                         break;
691                 }
692                 arg->count++;
693         }
694 }
695
696 static const struct Qdisc_class_ops fq_codel_class_ops = {
697         .leaf           =       fq_codel_leaf,
698         .get            =       fq_codel_get,
699         .put            =       fq_codel_put,
700         .tcf_chain      =       fq_codel_find_tcf,
701         .bind_tcf       =       fq_codel_bind,
702         .unbind_tcf     =       fq_codel_put,
703         .dump           =       fq_codel_dump_class,
704         .dump_stats     =       fq_codel_dump_class_stats,
705         .walk           =       fq_codel_walk,
706 };
707
708 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
709         .cl_ops         =       &fq_codel_class_ops,
710         .id             =       "fq_codel",
711         .priv_size      =       sizeof(struct fq_codel_sched_data),
712         .enqueue        =       fq_codel_enqueue,
713         .dequeue        =       fq_codel_dequeue,
714         .peek           =       qdisc_peek_dequeued,
715         .init           =       fq_codel_init,
716         .reset          =       fq_codel_reset,
717         .destroy        =       fq_codel_destroy,
718         .change         =       fq_codel_change,
719         .dump           =       fq_codel_dump,
720         .dump_stats =   fq_codel_dump_stats,
721         .owner          =       THIS_MODULE,
722 };
723
724 static int __init fq_codel_module_init(void)
725 {
726         return register_qdisc(&fq_codel_qdisc_ops);
727 }
728
729 static void __exit fq_codel_module_exit(void)
730 {
731         unregister_qdisc(&fq_codel_qdisc_ops);
732 }
733
734 module_init(fq_codel_module_init)
735 module_exit(fq_codel_module_exit)
736 MODULE_AUTHOR("Eric Dumazet");
737 MODULE_LICENSE("GPL");