ath10k: miscellaneous checkpatch fixes
[cascardo/linux.git] / drivers / staging / gdm72xx / gdm_qos.c
1 /*
2  * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
18
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
22
23 #include "gdm_wimax.h"
24 #include "hci.h"
25 #include "gdm_qos.h"
26
27 #define MAX_FREE_LIST_CNT               32
28 static struct {
29         struct list_head head;
30         int cnt;
31         spinlock_t lock;
32 } qos_free_list;
33
34 static void init_qos_entry_list(void)
35 {
36         qos_free_list.cnt = 0;
37         INIT_LIST_HEAD(&qos_free_list.head);
38         spin_lock_init(&qos_free_list.lock);
39 }
40
41 static void *alloc_qos_entry(void)
42 {
43         struct qos_entry_s *entry;
44         unsigned long flags;
45
46         spin_lock_irqsave(&qos_free_list.lock, flags);
47         if (qos_free_list.cnt) {
48                 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
49                                    list);
50                 list_del(&entry->list);
51                 qos_free_list.cnt--;
52                 spin_unlock_irqrestore(&qos_free_list.lock, flags);
53                 return entry;
54         }
55         spin_unlock_irqrestore(&qos_free_list.lock, flags);
56
57         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
58         return entry;
59 }
60
61 static void free_qos_entry(void *entry)
62 {
63         struct qos_entry_s *qentry = (struct qos_entry_s *)entry;
64         unsigned long flags;
65
66         spin_lock_irqsave(&qos_free_list.lock, flags);
67         if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
68                 list_add(&qentry->list, &qos_free_list.head);
69                 qos_free_list.cnt++;
70                 spin_unlock_irqrestore(&qos_free_list.lock, flags);
71                 return;
72         }
73         spin_unlock_irqrestore(&qos_free_list.lock, flags);
74
75         kfree(entry);
76 }
77
78 static void free_qos_entry_list(struct list_head *free_list)
79 {
80         struct qos_entry_s *entry, *n;
81         int total_free = 0;
82
83         list_for_each_entry_safe(entry, n, free_list, list) {
84                 list_del(&entry->list);
85                 kfree(entry);
86                 total_free++;
87         }
88
89         pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
90 }
91
92 void gdm_qos_init(void *nic_ptr)
93 {
94         struct nic *nic = nic_ptr;
95         struct qos_cb_s *qcb = &nic->qos;
96         int i;
97
98         for (i = 0; i < QOS_MAX; i++) {
99                 INIT_LIST_HEAD(&qcb->qos_list[i]);
100                 qcb->csr[i].qos_buf_count = 0;
101                 qcb->csr[i].enabled = false;
102         }
103
104         qcb->qos_list_cnt = 0;
105         qcb->qos_null_idx = QOS_MAX-1;
106         qcb->qos_limit_size = 255;
107
108         spin_lock_init(&qcb->qos_lock);
109
110         init_qos_entry_list();
111 }
112
113 void gdm_qos_release_list(void *nic_ptr)
114 {
115         struct nic *nic = nic_ptr;
116         struct qos_cb_s *qcb = &nic->qos;
117         unsigned long flags;
118         struct qos_entry_s *entry, *n;
119         struct list_head free_list;
120         int i;
121
122         INIT_LIST_HEAD(&free_list);
123
124         spin_lock_irqsave(&qcb->qos_lock, flags);
125
126         for (i = 0; i < QOS_MAX; i++) {
127                 qcb->csr[i].qos_buf_count = 0;
128                 qcb->csr[i].enabled = false;
129         }
130
131         qcb->qos_list_cnt = 0;
132         qcb->qos_null_idx = QOS_MAX-1;
133
134         for (i = 0; i < QOS_MAX; i++) {
135                 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
136                         list_move_tail(&entry->list, &free_list);
137                 }
138         }
139         spin_unlock_irqrestore(&qcb->qos_lock, flags);
140         free_qos_entry_list(&free_list);
141 }
142
143 static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
144 {
145         int i;
146
147         if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
148                 if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
149                     ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
150                         return 1;
151         }
152
153         if (csr->classifier_rule_en&PROTOCOL) {
154                 if (stream[9] != csr->protocol)
155                         return 1;
156         }
157
158         if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
159                 for (i = 0; i < 4; i++) {
160                         if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
161                         (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
162                                 return 1;
163                 }
164         }
165
166         if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
167                 for (i = 0; i < 4; i++) {
168                         if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
169                         (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
170                                 return 1;
171                 }
172         }
173
174         if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
175                 i = ((port[0]<<8)&0xff00)+port[1];
176                 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
177                         return 1;
178         }
179
180         if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
181                 i = ((port[2]<<8)&0xff00)+port[3];
182                 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
183                         return 1;
184         }
185
186         return 0;
187 }
188
189 static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
190 {
191         int ip_ver, i;
192         struct qos_cb_s *qcb = &nic->qos;
193
194         if (iph == NULL || tcpudph == NULL)
195                 return -1;
196
197         ip_ver = (iph[0]>>4)&0xf;
198
199         if (ip_ver != 4)
200                 return -1;
201
202         for (i = 0; i < QOS_MAX; i++) {
203                 if (!qcb->csr[i].enabled)
204                         continue;
205                 if (!qcb->csr[i].classifier_rule_en)
206                         continue;
207                 if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
208                         return i;
209         }
210
211         return -1;
212 }
213
214 static void extract_qos_list(struct nic *nic, struct list_head *head)
215 {
216         struct qos_cb_s *qcb = &nic->qos;
217         struct qos_entry_s *entry;
218         int i;
219
220         INIT_LIST_HEAD(head);
221
222         for (i = 0; i < QOS_MAX; i++) {
223                 if (!qcb->csr[i].enabled)
224                         continue;
225                 if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
226                         continue;
227                 if (list_empty(&qcb->qos_list[i]))
228                         continue;
229
230                 entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
231                                    list);
232
233                 list_move_tail(&entry->list, head);
234                 qcb->csr[i].qos_buf_count++;
235
236                 if (!list_empty(&qcb->qos_list[i]))
237                         netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
238         }
239 }
240
241 static void send_qos_list(struct nic *nic, struct list_head *head)
242 {
243         struct qos_entry_s *entry, *n;
244
245         list_for_each_entry_safe(entry, n, head, list) {
246                 list_del(&entry->list);
247                 gdm_wimax_send_tx(entry->skb, entry->dev);
248                 free_qos_entry(entry);
249         }
250 }
251
252 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
253 {
254         struct nic *nic = netdev_priv(dev);
255         int index;
256         struct qos_cb_s *qcb = &nic->qos;
257         unsigned long flags;
258         struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
259         struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
260         struct tcphdr *tcph;
261         struct qos_entry_s *entry = NULL;
262         struct list_head send_list;
263         int ret = 0;
264
265         tcph = (struct tcphdr *)iph + iph->ihl*4;
266
267         if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
268                 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
269                         entry = alloc_qos_entry();
270                         entry->skb = skb;
271                         entry->dev = dev;
272                         netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
273                                    qcb->qos_list_cnt);
274                 }
275
276                 spin_lock_irqsave(&qcb->qos_lock, flags);
277                 if (qcb->qos_list_cnt) {
278                         index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
279                         if (index == -1)
280                                 index = qcb->qos_null_idx;
281
282                         if (!entry) {
283                                 entry = alloc_qos_entry();
284                                 entry->skb = skb;
285                                 entry->dev = dev;
286                         }
287
288                         list_add_tail(&entry->list, &qcb->qos_list[index]);
289                         extract_qos_list(nic, &send_list);
290                         spin_unlock_irqrestore(&qcb->qos_lock, flags);
291                         send_qos_list(nic, &send_list);
292                         goto out;
293                 }
294                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
295                 if (entry)
296                         free_qos_entry(entry);
297         }
298
299         ret = gdm_wimax_send_tx(skb, dev);
300 out:
301         return ret;
302 }
303
304 static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
305 {
306         int i;
307
308         for (i = 0; i < qcb->qos_list_cnt; i++) {
309                 if (qcb->csr[i].sfid == sfid)
310                         return i;
311         }
312
313         if (mode) {
314                 for (i = 0; i < QOS_MAX; i++) {
315                         if (!qcb->csr[i].enabled) {
316                                 qcb->csr[i].enabled = true;
317                                 qcb->qos_list_cnt++;
318                                 return i;
319                         }
320                 }
321         }
322         return -1;
323 }
324
325 #define QOS_CHANGE_DEL  0xFC
326 #define QOS_ADD         0xFD
327 #define QOS_REPORT      0xFE
328
329 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
330 {
331         struct nic *nic = nic_ptr;
332         int i, index, pos;
333         u32 sfid;
334         u8 sub_cmd_evt;
335         struct qos_cb_s *qcb = &nic->qos;
336         struct qos_entry_s *entry, *n;
337         struct list_head send_list;
338         struct list_head free_list;
339         unsigned long flags;
340
341         sub_cmd_evt = (u8)buf[4];
342
343         if (sub_cmd_evt == QOS_REPORT) {
344                 spin_lock_irqsave(&qcb->qos_lock, flags);
345                 for (i = 0; i < qcb->qos_list_cnt; i++) {
346                         sfid = ((buf[(i*5)+6]<<24)&0xff000000);
347                         sfid += ((buf[(i*5)+7]<<16)&0xff0000);
348                         sfid += ((buf[(i*5)+8]<<8)&0xff00);
349                         sfid += (buf[(i*5)+9]);
350                         index = get_csr(qcb, sfid, 0);
351                         if (index == -1) {
352                                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
353                                 netdev_err(nic->netdev, "QoS ERROR: No SF\n");
354                                 return;
355                         }
356                         qcb->csr[index].qos_buf_count = buf[(i*5)+10];
357                 }
358
359                 extract_qos_list(nic, &send_list);
360                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
361                 send_qos_list(nic, &send_list);
362                 return;
363         }
364
365         /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
366         pos = 6;
367         sfid = ((buf[pos++]<<24)&0xff000000);
368         sfid += ((buf[pos++]<<16)&0xff0000);
369         sfid += ((buf[pos++]<<8)&0xff00);
370         sfid += (buf[pos++]);
371
372         index = get_csr(qcb, sfid, 1);
373         if (index == -1) {
374                 netdev_err(nic->netdev,
375                            "QoS ERROR: csr Update Error / Wrong index (%d)\n",
376                            index);
377                 return;
378         }
379
380         if (sub_cmd_evt == QOS_ADD) {
381                 netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
382                            sfid, index);
383
384                 spin_lock_irqsave(&qcb->qos_lock, flags);
385                 qcb->csr[index].sfid = sfid;
386                 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
387                 qcb->csr[index].classifier_rule_en += buf[pos++];
388                 if (qcb->csr[index].classifier_rule_en == 0)
389                         qcb->qos_null_idx = index;
390                 qcb->csr[index].ip2s_mask = buf[pos++];
391                 qcb->csr[index].ip2s_lo = buf[pos++];
392                 qcb->csr[index].ip2s_hi = buf[pos++];
393                 qcb->csr[index].protocol = buf[pos++];
394                 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
395                 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
396                 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
397                 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
398                 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
399                 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
400                 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
401                 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
402                 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
403                 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
404                 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
405                 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
406                 qcb->csr[index].ipdst_addr[0] = buf[pos++];
407                 qcb->csr[index].ipdst_addr[1] = buf[pos++];
408                 qcb->csr[index].ipdst_addr[2] = buf[pos++];
409                 qcb->csr[index].ipdst_addr[3] = buf[pos++];
410                 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
411                 qcb->csr[index].srcport_lo += buf[pos++];
412                 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
413                 qcb->csr[index].srcport_hi += buf[pos++];
414                 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
415                 qcb->csr[index].dstport_lo += buf[pos++];
416                 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
417                 qcb->csr[index].dstport_hi += buf[pos++];
418
419                 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
420                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
421         } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
422                 netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
423                            sfid, index);
424
425                 INIT_LIST_HEAD(&free_list);
426
427                 spin_lock_irqsave(&qcb->qos_lock, flags);
428                 qcb->csr[index].enabled = false;
429                 qcb->qos_list_cnt--;
430                 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
431
432                 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
433                                          list) {
434                         list_move_tail(&entry->list, &free_list);
435                 }
436                 spin_unlock_irqrestore(&qcb->qos_lock, flags);
437                 free_qos_entry_list(&free_list);
438         }
439 }