2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/etherdevice.h>
15 #include <asm/byteorder.h>
18 #include <linux/tcp.h>
19 #include <linux/if_ether.h>
21 #include "gdm_wimax.h"
25 #define B2H(x) __be16_to_cpu(x)
28 #define dprintk(fmt, args ...) printk(KERN_DEBUG "[QoS] " fmt, ## args)
30 #define wprintk(fmt, args ...) \
31 printk(KERN_WARNING "[QoS WARNING] " fmt, ## args)
33 #define eprintk(fmt, args ...) printk(KERN_ERR "[QoS ERROR] " fmt, ## args)
36 #define MAX_FREE_LIST_CNT 32
38 struct list_head head
;
43 static void init_qos_entry_list(void)
45 qos_free_list
.cnt
= 0;
46 INIT_LIST_HEAD(&qos_free_list
.head
);
47 spin_lock_init(&qos_free_list
.lock
);
50 static void *alloc_qos_entry(void)
52 struct qos_entry_s
*entry
;
55 spin_lock_irqsave(&qos_free_list
.lock
, flags
);
56 if (qos_free_list
.cnt
) {
57 entry
= list_entry(qos_free_list
.head
.prev
, struct qos_entry_s
,
59 list_del(&entry
->list
);
61 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
64 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
66 entry
= kmalloc(sizeof(struct qos_entry_s
), GFP_ATOMIC
);
70 static void free_qos_entry(void *entry
)
72 struct qos_entry_s
*qentry
= (struct qos_entry_s
*) entry
;
75 spin_lock_irqsave(&qos_free_list
.lock
, flags
);
76 if (qos_free_list
.cnt
< MAX_FREE_LIST_CNT
) {
77 list_add(&qentry
->list
, &qos_free_list
.head
);
79 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
82 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
87 static void free_qos_entry_list(struct list_head
*free_list
)
89 struct qos_entry_s
*entry
, *n
;
92 list_for_each_entry_safe(entry
, n
, free_list
, list
) {
93 list_del(&entry
->list
);
98 dprintk("%s: total_free_cnt=%d\n", __func__
, total_free
);
101 void gdm_qos_init(void *nic_ptr
)
103 struct nic
*nic
= nic_ptr
;
104 struct qos_cb_s
*qcb
= &nic
->qos
;
107 for (i
= 0 ; i
< QOS_MAX
; i
++) {
108 INIT_LIST_HEAD(&qcb
->qos_list
[i
]);
109 qcb
->csr
[i
].qos_buf_count
= 0;
110 qcb
->csr
[i
].enabled
= 0;
113 qcb
->qos_list_cnt
= 0;
114 qcb
->qos_null_idx
= QOS_MAX
-1;
115 qcb
->qos_limit_size
= 255;
117 spin_lock_init(&qcb
->qos_lock
);
119 init_qos_entry_list();
122 void gdm_qos_release_list(void *nic_ptr
)
124 struct nic
*nic
= nic_ptr
;
125 struct qos_cb_s
*qcb
= &nic
->qos
;
127 struct qos_entry_s
*entry
, *n
;
128 struct list_head free_list
;
131 INIT_LIST_HEAD(&free_list
);
133 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
135 for (i
= 0; i
< QOS_MAX
; i
++) {
136 qcb
->csr
[i
].qos_buf_count
= 0;
137 qcb
->csr
[i
].enabled
= 0;
140 qcb
->qos_list_cnt
= 0;
141 qcb
->qos_null_idx
= QOS_MAX
-1;
143 for (i
= 0; i
< QOS_MAX
; i
++) {
144 list_for_each_entry_safe(entry
, n
, &qcb
->qos_list
[i
], list
) {
145 list_move_tail(&entry
->list
, &free_list
);
148 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
149 free_qos_entry_list(&free_list
);
152 static u32
chk_ipv4_rule(struct gdm_wimax_csr_s
*csr
, u8
*Stream
, u8
*port
)
156 if (csr
->classifier_rule_en
&IPTYPEOFSERVICE
) {
157 if (((Stream
[1] & csr
->ip2s_mask
) < csr
->ip2s_lo
) ||
158 ((Stream
[1] & csr
->ip2s_mask
) > csr
->ip2s_hi
))
162 if (csr
->classifier_rule_en
&PROTOCOL
) {
163 if (Stream
[9] != csr
->protocol
)
167 if (csr
->classifier_rule_en
&IPMASKEDSRCADDRESS
) {
168 for (i
= 0; i
< 4; i
++) {
169 if ((Stream
[12 + i
] & csr
->ipsrc_addrmask
[i
]) !=
170 (csr
->ipsrc_addr
[i
] & csr
->ipsrc_addrmask
[i
]))
175 if (csr
->classifier_rule_en
&IPMASKEDDSTADDRESS
) {
176 for (i
= 0; i
< 4; i
++) {
177 if ((Stream
[16 + i
] & csr
->ipdst_addrmask
[i
]) !=
178 (csr
->ipdst_addr
[i
] & csr
->ipdst_addrmask
[i
]))
183 if (csr
->classifier_rule_en
&PROTOCOLSRCPORTRANGE
) {
184 i
= ((port
[0]<<8)&0xff00)+port
[1];
185 if ((i
< csr
->srcport_lo
) || (i
> csr
->srcport_hi
))
189 if (csr
->classifier_rule_en
&PROTOCOLDSTPORTRANGE
) {
190 i
= ((port
[2]<<8)&0xff00)+port
[3];
191 if ((i
< csr
->dstport_lo
) || (i
> csr
->dstport_hi
))
198 static u32
get_qos_index(struct nic
*nic
, u8
*iph
, u8
*tcpudph
)
200 u32 IP_Ver
, Header_Len
, i
;
201 struct qos_cb_s
*qcb
= &nic
->qos
;
203 if (iph
== NULL
|| tcpudph
== NULL
)
206 IP_Ver
= (iph
[0]>>4)&0xf;
207 Header_Len
= iph
[0]&0xf;
210 for (i
= 0; i
< QOS_MAX
; i
++) {
211 if (qcb
->csr
[i
].enabled
) {
212 if (qcb
->csr
[i
].classifier_rule_en
) {
213 if (chk_ipv4_rule(&qcb
->csr
[i
], iph
,
224 static u32
extract_qos_list(struct nic
*nic
, struct list_head
*head
)
226 struct qos_cb_s
*qcb
= &nic
->qos
;
227 struct qos_entry_s
*entry
;
230 INIT_LIST_HEAD(head
);
232 for (i
= 0; i
< QOS_MAX
; i
++) {
233 if (qcb
->csr
[i
].enabled
) {
234 if (qcb
->csr
[i
].qos_buf_count
< qcb
->qos_limit_size
) {
235 if (!list_empty(&qcb
->qos_list
[i
])) {
237 qcb
->qos_list
[i
].prev
,
238 struct qos_entry_s
, list
);
239 list_move_tail(&entry
->list
, head
);
240 qcb
->csr
[i
].qos_buf_count
++;
242 if (!list_empty(&qcb
->qos_list
[i
]))
243 wprintk("QoS Index(%d) is piled!!\n", i
);
252 static void send_qos_list(struct nic
*nic
, struct list_head
*head
)
254 struct qos_entry_s
*entry
, *n
;
256 list_for_each_entry_safe(entry
, n
, head
, list
) {
257 list_del(&entry
->list
);
258 free_qos_entry(entry
);
259 gdm_wimax_send_tx(entry
->skb
, entry
->dev
);
263 int gdm_qos_send_hci_pkt(struct sk_buff
*skb
, struct net_device
*dev
)
265 struct nic
*nic
= netdev_priv(dev
);
267 struct qos_cb_s
*qcb
= &nic
->qos
;
269 struct ethhdr
*ethh
= (struct ethhdr
*) (skb
->data
+ HCI_HEADER_SIZE
);
270 struct iphdr
*iph
= (struct iphdr
*) ((char *) ethh
+ ETH_HLEN
);
272 struct qos_entry_s
*entry
= NULL
;
273 struct list_head send_list
;
276 tcph
= (struct tcphdr
*) iph
+ iph
->ihl
*4;
278 if (B2H(ethh
->h_proto
) == ETH_P_IP
) {
279 if (qcb
->qos_list_cnt
&& !qos_free_list
.cnt
) {
280 entry
= alloc_qos_entry();
283 dprintk("qcb->qos_list_cnt=%d\n", qcb
->qos_list_cnt
);
286 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
287 if (qcb
->qos_list_cnt
) {
288 index
= get_qos_index(nic
, (u8
*)iph
, (u8
*) tcph
);
290 index
= qcb
->qos_null_idx
;
293 entry
= alloc_qos_entry();
298 list_add_tail(&entry
->list
, &qcb
->qos_list
[index
]);
299 extract_qos_list(nic
, &send_list
);
300 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
301 send_qos_list(nic
, &send_list
);
304 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
306 free_qos_entry(entry
);
309 ret
= gdm_wimax_send_tx(skb
, dev
);
314 static u32
get_csr(struct qos_cb_s
*qcb
, u32 SFID
, int mode
)
318 for (i
= 0; i
< qcb
->qos_list_cnt
; i
++) {
319 if (qcb
->csr
[i
].SFID
== SFID
)
324 for (i
= 0; i
< QOS_MAX
; i
++) {
325 if (qcb
->csr
[i
].enabled
== 0) {
326 qcb
->csr
[i
].enabled
= 1;
335 #define QOS_CHANGE_DEL 0xFC
337 #define QOS_REPORT 0xFE
339 void gdm_recv_qos_hci_packet(void *nic_ptr
, u8
*buf
, int size
)
341 struct nic
*nic
= nic_ptr
;
342 u32 i
, SFID
, index
, pos
;
345 struct qos_cb_s
*qcb
= &nic
->qos
;
346 struct qos_entry_s
*entry
, *n
;
347 struct list_head send_list
;
348 struct list_head free_list
;
351 subCmdEvt
= (u8
)buf
[4];
353 if (subCmdEvt
== QOS_REPORT
) {
356 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
357 for (i
= 0; i
< qcb
->qos_list_cnt
; i
++) {
358 SFID
= ((buf
[(i
*5)+6]<<24)&0xff000000);
359 SFID
+= ((buf
[(i
*5)+7]<<16)&0xff0000);
360 SFID
+= ((buf
[(i
*5)+8]<<8)&0xff00);
361 SFID
+= (buf
[(i
*5)+9]);
362 index
= get_csr(qcb
, SFID
, 0);
364 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
365 eprintk("QoS ERROR: No SF\n");
368 qcb
->csr
[index
].qos_buf_count
= buf
[(i
*5)+10];
371 extract_qos_list(nic
, &send_list
);
372 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
373 send_qos_list(nic
, &send_list
);
375 } else if (subCmdEvt
== QOS_ADD
) {
377 len
= (u8
)buf
[pos
++];
379 SFID
= ((buf
[pos
++]<<24)&0xff000000);
380 SFID
+= ((buf
[pos
++]<<16)&0xff0000);
381 SFID
+= ((buf
[pos
++]<<8)&0xff00);
382 SFID
+= (buf
[pos
++]);
384 index
= get_csr(qcb
, SFID
, 1);
386 eprintk("QoS ERROR: csr Update Error\n");
390 dprintk("QOS_ADD SFID = 0x%x, index=%d\n", SFID
, index
);
392 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
393 qcb
->csr
[index
].SFID
= SFID
;
394 qcb
->csr
[index
].classifier_rule_en
= ((buf
[pos
++]<<8)&0xff00);
395 qcb
->csr
[index
].classifier_rule_en
+= buf
[pos
++];
396 if (qcb
->csr
[index
].classifier_rule_en
== 0)
397 qcb
->qos_null_idx
= index
;
398 qcb
->csr
[index
].ip2s_mask
= buf
[pos
++];
399 qcb
->csr
[index
].ip2s_lo
= buf
[pos
++];
400 qcb
->csr
[index
].ip2s_hi
= buf
[pos
++];
401 qcb
->csr
[index
].protocol
= buf
[pos
++];
402 qcb
->csr
[index
].ipsrc_addrmask
[0] = buf
[pos
++];
403 qcb
->csr
[index
].ipsrc_addrmask
[1] = buf
[pos
++];
404 qcb
->csr
[index
].ipsrc_addrmask
[2] = buf
[pos
++];
405 qcb
->csr
[index
].ipsrc_addrmask
[3] = buf
[pos
++];
406 qcb
->csr
[index
].ipsrc_addr
[0] = buf
[pos
++];
407 qcb
->csr
[index
].ipsrc_addr
[1] = buf
[pos
++];
408 qcb
->csr
[index
].ipsrc_addr
[2] = buf
[pos
++];
409 qcb
->csr
[index
].ipsrc_addr
[3] = buf
[pos
++];
410 qcb
->csr
[index
].ipdst_addrmask
[0] = buf
[pos
++];
411 qcb
->csr
[index
].ipdst_addrmask
[1] = buf
[pos
++];
412 qcb
->csr
[index
].ipdst_addrmask
[2] = buf
[pos
++];
413 qcb
->csr
[index
].ipdst_addrmask
[3] = buf
[pos
++];
414 qcb
->csr
[index
].ipdst_addr
[0] = buf
[pos
++];
415 qcb
->csr
[index
].ipdst_addr
[1] = buf
[pos
++];
416 qcb
->csr
[index
].ipdst_addr
[2] = buf
[pos
++];
417 qcb
->csr
[index
].ipdst_addr
[3] = buf
[pos
++];
418 qcb
->csr
[index
].srcport_lo
= ((buf
[pos
++]<<8)&0xff00);
419 qcb
->csr
[index
].srcport_lo
+= buf
[pos
++];
420 qcb
->csr
[index
].srcport_hi
= ((buf
[pos
++]<<8)&0xff00);
421 qcb
->csr
[index
].srcport_hi
+= buf
[pos
++];
422 qcb
->csr
[index
].dstport_lo
= ((buf
[pos
++]<<8)&0xff00);
423 qcb
->csr
[index
].dstport_lo
+= buf
[pos
++];
424 qcb
->csr
[index
].dstport_hi
= ((buf
[pos
++]<<8)&0xff00);
425 qcb
->csr
[index
].dstport_hi
+= buf
[pos
++];
427 qcb
->qos_limit_size
= 254/qcb
->qos_list_cnt
;
428 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
429 } else if (subCmdEvt
== QOS_CHANGE_DEL
) {
431 len
= (u8
)buf
[pos
++];
432 SFID
= ((buf
[pos
++]<<24)&0xff000000);
433 SFID
+= ((buf
[pos
++]<<16)&0xff0000);
434 SFID
+= ((buf
[pos
++]<<8)&0xff00);
435 SFID
+= (buf
[pos
++]);
436 index
= get_csr(qcb
, SFID
, 1);
438 eprintk("QoS ERROR: Wrong index(%d)\n", index
);
442 dprintk("QOS_CHANGE_DEL SFID = 0x%x, index=%d\n", SFID
, index
);
444 INIT_LIST_HEAD(&free_list
);
446 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
447 qcb
->csr
[index
].enabled
= 0;
449 qcb
->qos_limit_size
= 254/qcb
->qos_list_cnt
;
451 list_for_each_entry_safe(entry
, n
, &qcb
->qos_list
[index
],
453 list_move_tail(&entry
->list
, &free_list
);
455 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
456 free_qos_entry_list(&free_list
);
This page took 0.074345 seconds and 5 git commands to generate.