Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
[deliverable/linux.git] / drivers / staging / gdm72xx / gdm_qos.c
1 /*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #include <linux/etherdevice.h>
15 #include <asm/byteorder.h>
16
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/if_ether.h>
20
21 #include "gdm_wimax.h"
22 #include "hci.h"
23 #include "gdm_qos.h"
24
25 #define B2H(x) __be16_to_cpu(x)
26
27 #undef dprintk
28 #define dprintk(fmt, args ...) printk(KERN_DEBUG "[QoS] " fmt, ## args)
29 #undef wprintk
30 #define wprintk(fmt, args ...) \
31 printk(KERN_WARNING "[QoS WARNING] " fmt, ## args)
32 #undef eprintk
33 #define eprintk(fmt, args ...) printk(KERN_ERR "[QoS ERROR] " fmt, ## args)
34
35
36 #define MAX_FREE_LIST_CNT 32
37 static struct {
38 struct list_head head;
39 int cnt;
40 spinlock_t lock;
41 } qos_free_list;
42
43 static void init_qos_entry_list(void)
44 {
45 qos_free_list.cnt = 0;
46 INIT_LIST_HEAD(&qos_free_list.head);
47 spin_lock_init(&qos_free_list.lock);
48 }
49
50 static void *alloc_qos_entry(void)
51 {
52 struct qos_entry_s *entry;
53 unsigned long flags;
54
55 spin_lock_irqsave(&qos_free_list.lock, flags);
56 if (qos_free_list.cnt) {
57 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
58 list);
59 list_del(&entry->list);
60 qos_free_list.cnt--;
61 spin_unlock_irqrestore(&qos_free_list.lock, flags);
62 return entry;
63 }
64 spin_unlock_irqrestore(&qos_free_list.lock, flags);
65
66 entry = kmalloc(sizeof(struct qos_entry_s), GFP_ATOMIC);
67 return entry;
68 }
69
70 static void free_qos_entry(void *entry)
71 {
72 struct qos_entry_s *qentry = (struct qos_entry_s *) entry;
73 unsigned long flags;
74
75 spin_lock_irqsave(&qos_free_list.lock, flags);
76 if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
77 list_add(&qentry->list, &qos_free_list.head);
78 qos_free_list.cnt++;
79 spin_unlock_irqrestore(&qos_free_list.lock, flags);
80 return;
81 }
82 spin_unlock_irqrestore(&qos_free_list.lock, flags);
83
84 kfree(entry);
85 }
86
87 static void free_qos_entry_list(struct list_head *free_list)
88 {
89 struct qos_entry_s *entry, *n;
90 int total_free = 0;
91
92 list_for_each_entry_safe(entry, n, free_list, list) {
93 list_del(&entry->list);
94 kfree(entry);
95 total_free++;
96 }
97
98 dprintk("%s: total_free_cnt=%d\n", __func__, total_free);
99 }
100
101 void gdm_qos_init(void *nic_ptr)
102 {
103 struct nic *nic = nic_ptr;
104 struct qos_cb_s *qcb = &nic->qos;
105 int i;
106
107 for (i = 0 ; i < QOS_MAX; i++) {
108 INIT_LIST_HEAD(&qcb->qos_list[i]);
109 qcb->csr[i].qos_buf_count = 0;
110 qcb->csr[i].enabled = 0;
111 }
112
113 qcb->qos_list_cnt = 0;
114 qcb->qos_null_idx = QOS_MAX-1;
115 qcb->qos_limit_size = 255;
116
117 spin_lock_init(&qcb->qos_lock);
118
119 init_qos_entry_list();
120 }
121
122 void gdm_qos_release_list(void *nic_ptr)
123 {
124 struct nic *nic = nic_ptr;
125 struct qos_cb_s *qcb = &nic->qos;
126 unsigned long flags;
127 struct qos_entry_s *entry, *n;
128 struct list_head free_list;
129 int i;
130
131 INIT_LIST_HEAD(&free_list);
132
133 spin_lock_irqsave(&qcb->qos_lock, flags);
134
135 for (i = 0; i < QOS_MAX; i++) {
136 qcb->csr[i].qos_buf_count = 0;
137 qcb->csr[i].enabled = 0;
138 }
139
140 qcb->qos_list_cnt = 0;
141 qcb->qos_null_idx = QOS_MAX-1;
142
143 for (i = 0; i < QOS_MAX; i++) {
144 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
145 list_move_tail(&entry->list, &free_list);
146 }
147 }
148 spin_unlock_irqrestore(&qcb->qos_lock, flags);
149 free_qos_entry_list(&free_list);
150 }
151
152 static u32 chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *Stream, u8 *port)
153 {
154 int i;
155
156 if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
157 if (((Stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
158 ((Stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
159 return 1;
160 }
161
162 if (csr->classifier_rule_en&PROTOCOL) {
163 if (Stream[9] != csr->protocol)
164 return 1;
165 }
166
167 if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
168 for (i = 0; i < 4; i++) {
169 if ((Stream[12 + i] & csr->ipsrc_addrmask[i]) !=
170 (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
171 return 1;
172 }
173 }
174
175 if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
176 for (i = 0; i < 4; i++) {
177 if ((Stream[16 + i] & csr->ipdst_addrmask[i]) !=
178 (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
179 return 1;
180 }
181 }
182
183 if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
184 i = ((port[0]<<8)&0xff00)+port[1];
185 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
186 return 1;
187 }
188
189 if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
190 i = ((port[2]<<8)&0xff00)+port[3];
191 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
192 return 1;
193 }
194
195 return 0;
196 }
197
198 static u32 get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
199 {
200 u32 IP_Ver, Header_Len, i;
201 struct qos_cb_s *qcb = &nic->qos;
202
203 if (iph == NULL || tcpudph == NULL)
204 return -1;
205
206 IP_Ver = (iph[0]>>4)&0xf;
207 Header_Len = iph[0]&0xf;
208
209 if (IP_Ver == 4) {
210 for (i = 0; i < QOS_MAX; i++) {
211 if (qcb->csr[i].enabled) {
212 if (qcb->csr[i].classifier_rule_en) {
213 if (chk_ipv4_rule(&qcb->csr[i], iph,
214 tcpudph) == 0)
215 return i;
216 }
217 }
218 }
219 }
220
221 return -1;
222 }
223
224 static u32 extract_qos_list(struct nic *nic, struct list_head *head)
225 {
226 struct qos_cb_s *qcb = &nic->qos;
227 struct qos_entry_s *entry;
228 int i;
229
230 INIT_LIST_HEAD(head);
231
232 for (i = 0; i < QOS_MAX; i++) {
233 if (qcb->csr[i].enabled) {
234 if (qcb->csr[i].qos_buf_count < qcb->qos_limit_size) {
235 if (!list_empty(&qcb->qos_list[i])) {
236 entry = list_entry(
237 qcb->qos_list[i].prev,
238 struct qos_entry_s, list);
239 list_move_tail(&entry->list, head);
240 qcb->csr[i].qos_buf_count++;
241
242 if (!list_empty(&qcb->qos_list[i]))
243 wprintk("QoS Index(%d) is piled!!\n", i);
244 }
245 }
246 }
247 }
248
249 return 0;
250 }
251
252 static void send_qos_list(struct nic *nic, struct list_head *head)
253 {
254 struct qos_entry_s *entry, *n;
255
256 list_for_each_entry_safe(entry, n, head, list) {
257 list_del(&entry->list);
258 free_qos_entry(entry);
259 gdm_wimax_send_tx(entry->skb, entry->dev);
260 }
261 }
262
263 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
264 {
265 struct nic *nic = netdev_priv(dev);
266 int index;
267 struct qos_cb_s *qcb = &nic->qos;
268 unsigned long flags;
269 struct ethhdr *ethh = (struct ethhdr *) (skb->data + HCI_HEADER_SIZE);
270 struct iphdr *iph = (struct iphdr *) ((char *) ethh + ETH_HLEN);
271 struct tcphdr *tcph;
272 struct qos_entry_s *entry = NULL;
273 struct list_head send_list;
274 int ret = 0;
275
276 tcph = (struct tcphdr *) iph + iph->ihl*4;
277
278 if (B2H(ethh->h_proto) == ETH_P_IP) {
279 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
280 entry = alloc_qos_entry();
281 entry->skb = skb;
282 entry->dev = dev;
283 dprintk("qcb->qos_list_cnt=%d\n", qcb->qos_list_cnt);
284 }
285
286 spin_lock_irqsave(&qcb->qos_lock, flags);
287 if (qcb->qos_list_cnt) {
288 index = get_qos_index(nic, (u8 *)iph, (u8 *) tcph);
289 if (index == -1)
290 index = qcb->qos_null_idx;
291
292 if (!entry) {
293 entry = alloc_qos_entry();
294 entry->skb = skb;
295 entry->dev = dev;
296 }
297
298 list_add_tail(&entry->list, &qcb->qos_list[index]);
299 extract_qos_list(nic, &send_list);
300 spin_unlock_irqrestore(&qcb->qos_lock, flags);
301 send_qos_list(nic, &send_list);
302 goto out;
303 }
304 spin_unlock_irqrestore(&qcb->qos_lock, flags);
305 if (entry)
306 free_qos_entry(entry);
307 }
308
309 ret = gdm_wimax_send_tx(skb, dev);
310 out:
311 return ret;
312 }
313
314 static u32 get_csr(struct qos_cb_s *qcb, u32 SFID, int mode)
315 {
316 int i;
317
318 for (i = 0; i < qcb->qos_list_cnt; i++) {
319 if (qcb->csr[i].SFID == SFID)
320 return i;
321 }
322
323 if (mode) {
324 for (i = 0; i < QOS_MAX; i++) {
325 if (qcb->csr[i].enabled == 0) {
326 qcb->csr[i].enabled = 1;
327 qcb->qos_list_cnt++;
328 return i;
329 }
330 }
331 }
332 return -1;
333 }
334
335 #define QOS_CHANGE_DEL 0xFC
336 #define QOS_ADD 0xFD
337 #define QOS_REPORT 0xFE
338
339 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
340 {
341 struct nic *nic = nic_ptr;
342 u32 i, SFID, index, pos;
343 u8 subCmdEvt;
344 u8 len;
345 struct qos_cb_s *qcb = &nic->qos;
346 struct qos_entry_s *entry, *n;
347 struct list_head send_list;
348 struct list_head free_list;
349 unsigned long flags;
350
351 subCmdEvt = (u8)buf[4];
352
353 if (subCmdEvt == QOS_REPORT) {
354 len = (u8)buf[5];
355
356 spin_lock_irqsave(&qcb->qos_lock, flags);
357 for (i = 0; i < qcb->qos_list_cnt; i++) {
358 SFID = ((buf[(i*5)+6]<<24)&0xff000000);
359 SFID += ((buf[(i*5)+7]<<16)&0xff0000);
360 SFID += ((buf[(i*5)+8]<<8)&0xff00);
361 SFID += (buf[(i*5)+9]);
362 index = get_csr(qcb, SFID, 0);
363 if (index == -1) {
364 spin_unlock_irqrestore(&qcb->qos_lock, flags);
365 eprintk("QoS ERROR: No SF\n");
366 return;
367 }
368 qcb->csr[index].qos_buf_count = buf[(i*5)+10];
369 }
370
371 extract_qos_list(nic, &send_list);
372 spin_unlock_irqrestore(&qcb->qos_lock, flags);
373 send_qos_list(nic, &send_list);
374 return;
375 } else if (subCmdEvt == QOS_ADD) {
376 pos = 5;
377 len = (u8)buf[pos++];
378
379 SFID = ((buf[pos++]<<24)&0xff000000);
380 SFID += ((buf[pos++]<<16)&0xff0000);
381 SFID += ((buf[pos++]<<8)&0xff00);
382 SFID += (buf[pos++]);
383
384 index = get_csr(qcb, SFID, 1);
385 if (index == -1) {
386 eprintk("QoS ERROR: csr Update Error\n");
387 return;
388 }
389
390 dprintk("QOS_ADD SFID = 0x%x, index=%d\n", SFID, index);
391
392 spin_lock_irqsave(&qcb->qos_lock, flags);
393 qcb->csr[index].SFID = SFID;
394 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
395 qcb->csr[index].classifier_rule_en += buf[pos++];
396 if (qcb->csr[index].classifier_rule_en == 0)
397 qcb->qos_null_idx = index;
398 qcb->csr[index].ip2s_mask = buf[pos++];
399 qcb->csr[index].ip2s_lo = buf[pos++];
400 qcb->csr[index].ip2s_hi = buf[pos++];
401 qcb->csr[index].protocol = buf[pos++];
402 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
403 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
404 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
405 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
406 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
407 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
408 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
409 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
410 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
411 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
412 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
413 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
414 qcb->csr[index].ipdst_addr[0] = buf[pos++];
415 qcb->csr[index].ipdst_addr[1] = buf[pos++];
416 qcb->csr[index].ipdst_addr[2] = buf[pos++];
417 qcb->csr[index].ipdst_addr[3] = buf[pos++];
418 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
419 qcb->csr[index].srcport_lo += buf[pos++];
420 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
421 qcb->csr[index].srcport_hi += buf[pos++];
422 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
423 qcb->csr[index].dstport_lo += buf[pos++];
424 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
425 qcb->csr[index].dstport_hi += buf[pos++];
426
427 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
428 spin_unlock_irqrestore(&qcb->qos_lock, flags);
429 } else if (subCmdEvt == QOS_CHANGE_DEL) {
430 pos = 5;
431 len = (u8)buf[pos++];
432 SFID = ((buf[pos++]<<24)&0xff000000);
433 SFID += ((buf[pos++]<<16)&0xff0000);
434 SFID += ((buf[pos++]<<8)&0xff00);
435 SFID += (buf[pos++]);
436 index = get_csr(qcb, SFID, 1);
437 if (index == -1) {
438 eprintk("QoS ERROR: Wrong index(%d)\n", index);
439 return;
440 }
441
442 dprintk("QOS_CHANGE_DEL SFID = 0x%x, index=%d\n", SFID, index);
443
444 INIT_LIST_HEAD(&free_list);
445
446 spin_lock_irqsave(&qcb->qos_lock, flags);
447 qcb->csr[index].enabled = 0;
448 qcb->qos_list_cnt--;
449 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
450
451 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
452 list) {
453 list_move_tail(&entry->list, &free_list);
454 }
455 spin_unlock_irqrestore(&qcb->qos_lock, flags);
456 free_qos_entry_list(&free_list);
457 }
458 }
This page took 0.042854 seconds and 5 git commands to generate.