2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
23 #include "gdm_wimax.h"
27 #define MAX_FREE_LIST_CNT 32
29 struct list_head head;
34 static void init_qos_entry_list(void)
36 qos_free_list.cnt = 0;
37 INIT_LIST_HEAD(&qos_free_list.head);
38 spin_lock_init(&qos_free_list.lock);
41 static void *alloc_qos_entry(void)
43 struct qos_entry_s *entry;
46 spin_lock_irqsave(&qos_free_list.lock, flags);
47 if (qos_free_list.cnt) {
48 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
50 list_del(&entry->list);
52 spin_unlock_irqrestore(&qos_free_list.lock, flags);
55 spin_unlock_irqrestore(&qos_free_list.lock, flags);
57 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
61 static void free_qos_entry(void *entry)
63 struct qos_entry_s *qentry = (struct qos_entry_s *)entry;
66 spin_lock_irqsave(&qos_free_list.lock, flags);
67 if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
68 list_add(&qentry->list, &qos_free_list.head);
70 spin_unlock_irqrestore(&qos_free_list.lock, flags);
73 spin_unlock_irqrestore(&qos_free_list.lock, flags);
78 static void free_qos_entry_list(struct list_head *free_list)
80 struct qos_entry_s *entry, *n;
83 list_for_each_entry_safe(entry, n, free_list, list) {
84 list_del(&entry->list);
89 pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
92 void gdm_qos_init(void *nic_ptr)
94 struct nic *nic = nic_ptr;
95 struct qos_cb_s *qcb = &nic->qos;
98 for (i = 0; i < QOS_MAX; i++) {
99 INIT_LIST_HEAD(&qcb->qos_list[i]);
100 qcb->csr[i].qos_buf_count = 0;
101 qcb->csr[i].enabled = false;
104 qcb->qos_list_cnt = 0;
105 qcb->qos_null_idx = QOS_MAX-1;
106 qcb->qos_limit_size = 255;
108 spin_lock_init(&qcb->qos_lock);
110 init_qos_entry_list();
113 void gdm_qos_release_list(void *nic_ptr)
115 struct nic *nic = nic_ptr;
116 struct qos_cb_s *qcb = &nic->qos;
118 struct qos_entry_s *entry, *n;
119 struct list_head free_list;
122 INIT_LIST_HEAD(&free_list);
124 spin_lock_irqsave(&qcb->qos_lock, flags);
126 for (i = 0; i < QOS_MAX; i++) {
127 qcb->csr[i].qos_buf_count = 0;
128 qcb->csr[i].enabled = false;
131 qcb->qos_list_cnt = 0;
132 qcb->qos_null_idx = QOS_MAX-1;
134 for (i = 0; i < QOS_MAX; i++) {
135 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
136 list_move_tail(&entry->list, &free_list);
139 spin_unlock_irqrestore(&qcb->qos_lock, flags);
140 free_qos_entry_list(&free_list);
143 static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
147 if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
148 if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
149 ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
153 if (csr->classifier_rule_en&PROTOCOL) {
154 if (stream[9] != csr->protocol)
158 if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
159 for (i = 0; i < 4; i++) {
160 if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
161 (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
166 if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
167 for (i = 0; i < 4; i++) {
168 if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
169 (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
174 if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
175 i = ((port[0]<<8)&0xff00)+port[1];
176 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
180 if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
181 i = ((port[2]<<8)&0xff00)+port[3];
182 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
189 static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
192 struct qos_cb_s *qcb = &nic->qos;
194 if (iph == NULL || tcpudph == NULL)
197 ip_ver = (iph[0]>>4)&0xf;
202 for (i = 0; i < QOS_MAX; i++) {
203 if (!qcb->csr[i].enabled)
205 if (!qcb->csr[i].classifier_rule_en)
207 if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
214 static void extract_qos_list(struct nic *nic, struct list_head *head)
216 struct qos_cb_s *qcb = &nic->qos;
217 struct qos_entry_s *entry;
220 INIT_LIST_HEAD(head);
222 for (i = 0; i < QOS_MAX; i++) {
223 if (!qcb->csr[i].enabled)
225 if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
227 if (list_empty(&qcb->qos_list[i]))
230 entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
233 list_move_tail(&entry->list, head);
234 qcb->csr[i].qos_buf_count++;
236 if (!list_empty(&qcb->qos_list[i]))
237 netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
241 static void send_qos_list(struct nic *nic, struct list_head *head)
243 struct qos_entry_s *entry, *n;
245 list_for_each_entry_safe(entry, n, head, list) {
246 list_del(&entry->list);
247 gdm_wimax_send_tx(entry->skb, entry->dev);
248 free_qos_entry(entry);
252 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
254 struct nic *nic = netdev_priv(dev);
256 struct qos_cb_s *qcb = &nic->qos;
258 struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
259 struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
261 struct qos_entry_s *entry = NULL;
262 struct list_head send_list;
265 tcph = (struct tcphdr *)iph + iph->ihl*4;
267 if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
268 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
269 entry = alloc_qos_entry();
272 netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
276 spin_lock_irqsave(&qcb->qos_lock, flags);
277 if (qcb->qos_list_cnt) {
278 index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
280 index = qcb->qos_null_idx;
283 entry = alloc_qos_entry();
288 list_add_tail(&entry->list, &qcb->qos_list[index]);
289 extract_qos_list(nic, &send_list);
290 spin_unlock_irqrestore(&qcb->qos_lock, flags);
291 send_qos_list(nic, &send_list);
294 spin_unlock_irqrestore(&qcb->qos_lock, flags);
296 free_qos_entry(entry);
299 ret = gdm_wimax_send_tx(skb, dev);
304 static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
308 for (i = 0; i < qcb->qos_list_cnt; i++) {
309 if (qcb->csr[i].sfid == sfid)
314 for (i = 0; i < QOS_MAX; i++) {
315 if (!qcb->csr[i].enabled) {
316 qcb->csr[i].enabled = true;
325 #define QOS_CHANGE_DEL 0xFC
327 #define QOS_REPORT 0xFE
329 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
331 struct nic *nic = nic_ptr;
335 struct qos_cb_s *qcb = &nic->qos;
336 struct qos_entry_s *entry, *n;
337 struct list_head send_list;
338 struct list_head free_list;
341 sub_cmd_evt = (u8)buf[4];
343 if (sub_cmd_evt == QOS_REPORT) {
344 spin_lock_irqsave(&qcb->qos_lock, flags);
345 for (i = 0; i < qcb->qos_list_cnt; i++) {
346 sfid = ((buf[(i*5)+6]<<24)&0xff000000);
347 sfid += ((buf[(i*5)+7]<<16)&0xff0000);
348 sfid += ((buf[(i*5)+8]<<8)&0xff00);
349 sfid += (buf[(i*5)+9]);
350 index = get_csr(qcb, sfid, 0);
352 spin_unlock_irqrestore(&qcb->qos_lock, flags);
353 netdev_err(nic->netdev, "QoS ERROR: No SF\n");
356 qcb->csr[index].qos_buf_count = buf[(i*5)+10];
359 extract_qos_list(nic, &send_list);
360 spin_unlock_irqrestore(&qcb->qos_lock, flags);
361 send_qos_list(nic, &send_list);
365 /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
367 sfid = ((buf[pos++]<<24)&0xff000000);
368 sfid += ((buf[pos++]<<16)&0xff0000);
369 sfid += ((buf[pos++]<<8)&0xff00);
370 sfid += (buf[pos++]);
372 index = get_csr(qcb, sfid, 1);
374 netdev_err(nic->netdev,
375 "QoS ERROR: csr Update Error / Wrong index (%d)\n",
380 if (sub_cmd_evt == QOS_ADD) {
381 netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
384 spin_lock_irqsave(&qcb->qos_lock, flags);
385 qcb->csr[index].sfid = sfid;
386 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
387 qcb->csr[index].classifier_rule_en += buf[pos++];
388 if (qcb->csr[index].classifier_rule_en == 0)
389 qcb->qos_null_idx = index;
390 qcb->csr[index].ip2s_mask = buf[pos++];
391 qcb->csr[index].ip2s_lo = buf[pos++];
392 qcb->csr[index].ip2s_hi = buf[pos++];
393 qcb->csr[index].protocol = buf[pos++];
394 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
395 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
396 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
397 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
398 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
399 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
400 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
401 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
402 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
403 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
404 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
405 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
406 qcb->csr[index].ipdst_addr[0] = buf[pos++];
407 qcb->csr[index].ipdst_addr[1] = buf[pos++];
408 qcb->csr[index].ipdst_addr[2] = buf[pos++];
409 qcb->csr[index].ipdst_addr[3] = buf[pos++];
410 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
411 qcb->csr[index].srcport_lo += buf[pos++];
412 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
413 qcb->csr[index].srcport_hi += buf[pos++];
414 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
415 qcb->csr[index].dstport_lo += buf[pos++];
416 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
417 qcb->csr[index].dstport_hi += buf[pos++];
419 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
420 spin_unlock_irqrestore(&qcb->qos_lock, flags);
421 } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
422 netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
425 INIT_LIST_HEAD(&free_list);
427 spin_lock_irqsave(&qcb->qos_lock, flags);
428 qcb->csr[index].enabled = false;
430 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
432 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
434 list_move_tail(&entry->list, &free_list);
436 spin_unlock_irqrestore(&qcb->qos_lock, flags);
437 free_qos_entry_list(&free_list);