Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / include / net / fq_impl.h
1 /*
2  * Copyright (c) 2016 Qualcomm Atheros, Inc
3  *
4  * GPL v2
5  *
6  * Based on net/sched/sch_fq_codel.c
7  */
8 #ifndef __NET_SCHED_FQ_IMPL_H
9 #define __NET_SCHED_FQ_IMPL_H
10
11 #include <net/fq.h>
12
13 /* functions that are embedded into includer */
14
15 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
16                                        struct fq_flow *flow)
17 {
18         struct fq_tin *tin = flow->tin;
19         struct fq_flow *i;
20         struct sk_buff *skb;
21
22         lockdep_assert_held(&fq->lock);
23
24         skb = __skb_dequeue(&flow->queue);
25         if (!skb)
26                 return NULL;
27
28         tin->backlog_bytes -= skb->len;
29         tin->backlog_packets--;
30         flow->backlog -= skb->len;
31         fq->backlog--;
32         fq->memory_usage -= skb->truesize;
33
34         if (flow->backlog == 0) {
35                 list_del_init(&flow->backlogchain);
36         } else {
37                 i = flow;
38
39                 list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
40                         if (i->backlog < flow->backlog)
41                                 break;
42
43                 list_move_tail(&flow->backlogchain,
44                                &i->backlogchain);
45         }
46
47         return skb;
48 }
49
50 static struct sk_buff *fq_tin_dequeue(struct fq *fq,
51                                       struct fq_tin *tin,
52                                       fq_tin_dequeue_t dequeue_func)
53 {
54         struct fq_flow *flow;
55         struct list_head *head;
56         struct sk_buff *skb;
57
58         lockdep_assert_held(&fq->lock);
59
60 begin:
61         head = &tin->new_flows;
62         if (list_empty(head)) {
63                 head = &tin->old_flows;
64                 if (list_empty(head))
65                         return NULL;
66         }
67
68         flow = list_first_entry(head, struct fq_flow, flowchain);
69
70         if (flow->deficit <= 0) {
71                 flow->deficit += fq->quantum;
72                 list_move_tail(&flow->flowchain,
73                                &tin->old_flows);
74                 goto begin;
75         }
76
77         skb = dequeue_func(fq, tin, flow);
78         if (!skb) {
79                 /* force a pass through old_flows to prevent starvation */
80                 if ((head == &tin->new_flows) &&
81                     !list_empty(&tin->old_flows)) {
82                         list_move_tail(&flow->flowchain, &tin->old_flows);
83                 } else {
84                         list_del_init(&flow->flowchain);
85                         flow->tin = NULL;
86                 }
87                 goto begin;
88         }
89
90         flow->deficit -= skb->len;
91         tin->tx_bytes += skb->len;
92         tin->tx_packets++;
93
94         return skb;
95 }
96
97 static struct fq_flow *fq_flow_classify(struct fq *fq,
98                                         struct fq_tin *tin,
99                                         struct sk_buff *skb,
100                                         fq_flow_get_default_t get_default_func)
101 {
102         struct fq_flow *flow;
103         u32 hash;
104         u32 idx;
105
106         lockdep_assert_held(&fq->lock);
107
108         hash = skb_get_hash_perturb(skb, fq->perturbation);
109         idx = reciprocal_scale(hash, fq->flows_cnt);
110         flow = &fq->flows[idx];
111
112         if (flow->tin && flow->tin != tin) {
113                 flow = get_default_func(fq, tin, idx, skb);
114                 tin->collisions++;
115                 fq->collisions++;
116         }
117
118         if (!flow->tin)
119                 tin->flows++;
120
121         return flow;
122 }
123
124 static void fq_recalc_backlog(struct fq *fq,
125                               struct fq_tin *tin,
126                               struct fq_flow *flow)
127 {
128         struct fq_flow *i;
129
130         if (list_empty(&flow->backlogchain))
131                 list_add_tail(&flow->backlogchain, &fq->backlogs);
132
133         i = flow;
134         list_for_each_entry_continue_reverse(i, &fq->backlogs,
135                                              backlogchain)
136                 if (i->backlog > flow->backlog)
137                         break;
138
139         list_move(&flow->backlogchain, &i->backlogchain);
140 }
141
142 static void fq_tin_enqueue(struct fq *fq,
143                            struct fq_tin *tin,
144                            struct sk_buff *skb,
145                            fq_skb_free_t free_func,
146                            fq_flow_get_default_t get_default_func)
147 {
148         struct fq_flow *flow;
149
150         lockdep_assert_held(&fq->lock);
151
152         flow = fq_flow_classify(fq, tin, skb, get_default_func);
153
154         flow->tin = tin;
155         flow->backlog += skb->len;
156         tin->backlog_bytes += skb->len;
157         tin->backlog_packets++;
158         fq->memory_usage += skb->truesize;
159         fq->backlog++;
160
161         fq_recalc_backlog(fq, tin, flow);
162
163         if (list_empty(&flow->flowchain)) {
164                 flow->deficit = fq->quantum;
165                 list_add_tail(&flow->flowchain,
166                               &tin->new_flows);
167         }
168
169         __skb_queue_tail(&flow->queue, skb);
170
171         if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
172                 flow = list_first_entry_or_null(&fq->backlogs,
173                                                 struct fq_flow,
174                                                 backlogchain);
175                 if (!flow)
176                         return;
177
178                 skb = fq_flow_dequeue(fq, flow);
179                 if (!skb)
180                         return;
181
182                 free_func(fq, flow->tin, flow, skb);
183
184                 flow->tin->overlimit++;
185                 fq->overlimit++;
186                 if (fq->memory_usage > fq->memory_limit)
187                         fq->overmemory++;
188         }
189 }
190
191 static void fq_flow_reset(struct fq *fq,
192                           struct fq_flow *flow,
193                           fq_skb_free_t free_func)
194 {
195         struct sk_buff *skb;
196
197         while ((skb = fq_flow_dequeue(fq, flow)))
198                 free_func(fq, flow->tin, flow, skb);
199
200         if (!list_empty(&flow->flowchain))
201                 list_del_init(&flow->flowchain);
202
203         if (!list_empty(&flow->backlogchain))
204                 list_del_init(&flow->backlogchain);
205
206         flow->tin = NULL;
207
208         WARN_ON_ONCE(flow->backlog);
209 }
210
211 static void fq_tin_reset(struct fq *fq,
212                          struct fq_tin *tin,
213                          fq_skb_free_t free_func)
214 {
215         struct list_head *head;
216         struct fq_flow *flow;
217
218         for (;;) {
219                 head = &tin->new_flows;
220                 if (list_empty(head)) {
221                         head = &tin->old_flows;
222                         if (list_empty(head))
223                                 break;
224                 }
225
226                 flow = list_first_entry(head, struct fq_flow, flowchain);
227                 fq_flow_reset(fq, flow, free_func);
228         }
229
230         WARN_ON_ONCE(tin->backlog_bytes);
231         WARN_ON_ONCE(tin->backlog_packets);
232 }
233
234 static void fq_flow_init(struct fq_flow *flow)
235 {
236         INIT_LIST_HEAD(&flow->flowchain);
237         INIT_LIST_HEAD(&flow->backlogchain);
238         __skb_queue_head_init(&flow->queue);
239 }
240
241 static void fq_tin_init(struct fq_tin *tin)
242 {
243         INIT_LIST_HEAD(&tin->new_flows);
244         INIT_LIST_HEAD(&tin->old_flows);
245 }
246
247 static int fq_init(struct fq *fq, int flows_cnt)
248 {
249         int i;
250
251         memset(fq, 0, sizeof(fq[0]));
252         INIT_LIST_HEAD(&fq->backlogs);
253         spin_lock_init(&fq->lock);
254         fq->flows_cnt = max_t(u32, flows_cnt, 1);
255         fq->perturbation = prandom_u32();
256         fq->quantum = 300;
257         fq->limit = 8192;
258         fq->memory_limit = 16 << 20; /* 16 MBytes */
259
260         fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
261         if (!fq->flows)
262                 return -ENOMEM;
263
264         for (i = 0; i < fq->flows_cnt; i++)
265                 fq_flow_init(&fq->flows[i]);
266
267         return 0;
268 }
269
270 static void fq_reset(struct fq *fq,
271                      fq_skb_free_t free_func)
272 {
273         int i;
274
275         for (i = 0; i < fq->flows_cnt; i++)
276                 fq_flow_reset(fq, &fq->flows[i], free_func);
277
278         kfree(fq->flows);
279         fq->flows = NULL;
280 }
281
282 #endif