Merge 3.9-rc5 into staging-next
[deliverable/linux.git] / drivers / staging / gdm72xx / gdm_qos.c
1 /*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
18
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
22
23 #include "gdm_wimax.h"
24 #include "hci.h"
25 #include "gdm_qos.h"
26
27 #define B2H(x) __be16_to_cpu(x)
28
29 #define MAX_FREE_LIST_CNT 32
30 static struct {
31 struct list_head head;
32 int cnt;
33 spinlock_t lock;
34 } qos_free_list;
35
36 static void init_qos_entry_list(void)
37 {
38 qos_free_list.cnt = 0;
39 INIT_LIST_HEAD(&qos_free_list.head);
40 spin_lock_init(&qos_free_list.lock);
41 }
42
43 static void *alloc_qos_entry(void)
44 {
45 struct qos_entry_s *entry;
46 unsigned long flags;
47
48 spin_lock_irqsave(&qos_free_list.lock, flags);
49 if (qos_free_list.cnt) {
50 entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
51 list);
52 list_del(&entry->list);
53 qos_free_list.cnt--;
54 spin_unlock_irqrestore(&qos_free_list.lock, flags);
55 return entry;
56 }
57 spin_unlock_irqrestore(&qos_free_list.lock, flags);
58
59 entry = kmalloc(sizeof(struct qos_entry_s), GFP_ATOMIC);
60 return entry;
61 }
62
63 static void free_qos_entry(void *entry)
64 {
65 struct qos_entry_s *qentry = (struct qos_entry_s *) entry;
66 unsigned long flags;
67
68 spin_lock_irqsave(&qos_free_list.lock, flags);
69 if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
70 list_add(&qentry->list, &qos_free_list.head);
71 qos_free_list.cnt++;
72 spin_unlock_irqrestore(&qos_free_list.lock, flags);
73 return;
74 }
75 spin_unlock_irqrestore(&qos_free_list.lock, flags);
76
77 kfree(entry);
78 }
79
80 static void free_qos_entry_list(struct list_head *free_list)
81 {
82 struct qos_entry_s *entry, *n;
83 int total_free = 0;
84
85 list_for_each_entry_safe(entry, n, free_list, list) {
86 list_del(&entry->list);
87 kfree(entry);
88 total_free++;
89 }
90
91 pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
92 }
93
94 void gdm_qos_init(void *nic_ptr)
95 {
96 struct nic *nic = nic_ptr;
97 struct qos_cb_s *qcb = &nic->qos;
98 int i;
99
100 for (i = 0 ; i < QOS_MAX; i++) {
101 INIT_LIST_HEAD(&qcb->qos_list[i]);
102 qcb->csr[i].qos_buf_count = 0;
103 qcb->csr[i].enabled = 0;
104 }
105
106 qcb->qos_list_cnt = 0;
107 qcb->qos_null_idx = QOS_MAX-1;
108 qcb->qos_limit_size = 255;
109
110 spin_lock_init(&qcb->qos_lock);
111
112 init_qos_entry_list();
113 }
114
115 void gdm_qos_release_list(void *nic_ptr)
116 {
117 struct nic *nic = nic_ptr;
118 struct qos_cb_s *qcb = &nic->qos;
119 unsigned long flags;
120 struct qos_entry_s *entry, *n;
121 struct list_head free_list;
122 int i;
123
124 INIT_LIST_HEAD(&free_list);
125
126 spin_lock_irqsave(&qcb->qos_lock, flags);
127
128 for (i = 0; i < QOS_MAX; i++) {
129 qcb->csr[i].qos_buf_count = 0;
130 qcb->csr[i].enabled = 0;
131 }
132
133 qcb->qos_list_cnt = 0;
134 qcb->qos_null_idx = QOS_MAX-1;
135
136 for (i = 0; i < QOS_MAX; i++) {
137 list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
138 list_move_tail(&entry->list, &free_list);
139 }
140 }
141 spin_unlock_irqrestore(&qcb->qos_lock, flags);
142 free_qos_entry_list(&free_list);
143 }
144
145 static u32 chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *Stream, u8 *port)
146 {
147 int i;
148
149 if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
150 if (((Stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
151 ((Stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
152 return 1;
153 }
154
155 if (csr->classifier_rule_en&PROTOCOL) {
156 if (Stream[9] != csr->protocol)
157 return 1;
158 }
159
160 if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
161 for (i = 0; i < 4; i++) {
162 if ((Stream[12 + i] & csr->ipsrc_addrmask[i]) !=
163 (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
164 return 1;
165 }
166 }
167
168 if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
169 for (i = 0; i < 4; i++) {
170 if ((Stream[16 + i] & csr->ipdst_addrmask[i]) !=
171 (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
172 return 1;
173 }
174 }
175
176 if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
177 i = ((port[0]<<8)&0xff00)+port[1];
178 if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
179 return 1;
180 }
181
182 if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
183 i = ((port[2]<<8)&0xff00)+port[3];
184 if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
185 return 1;
186 }
187
188 return 0;
189 }
190
191 static u32 get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
192 {
193 u32 IP_Ver, Header_Len, i;
194 struct qos_cb_s *qcb = &nic->qos;
195
196 if (iph == NULL || tcpudph == NULL)
197 return -1;
198
199 IP_Ver = (iph[0]>>4)&0xf;
200 Header_Len = iph[0]&0xf;
201
202 if (IP_Ver == 4) {
203 for (i = 0; i < QOS_MAX; i++) {
204 if (qcb->csr[i].enabled) {
205 if (qcb->csr[i].classifier_rule_en) {
206 if (chk_ipv4_rule(&qcb->csr[i], iph,
207 tcpudph) == 0)
208 return i;
209 }
210 }
211 }
212 }
213
214 return -1;
215 }
216
217 static u32 extract_qos_list(struct nic *nic, struct list_head *head)
218 {
219 struct qos_cb_s *qcb = &nic->qos;
220 struct qos_entry_s *entry;
221 int i;
222
223 INIT_LIST_HEAD(head);
224
225 for (i = 0; i < QOS_MAX; i++) {
226 if (qcb->csr[i].enabled) {
227 if (qcb->csr[i].qos_buf_count < qcb->qos_limit_size) {
228 if (!list_empty(&qcb->qos_list[i])) {
229 entry = list_entry(
230 qcb->qos_list[i].prev,
231 struct qos_entry_s, list);
232 list_move_tail(&entry->list, head);
233 qcb->csr[i].qos_buf_count++;
234
235 if (!list_empty(&qcb->qos_list[i]))
236 netdev_warn(nic->netdev,
237 "Index(%d) is piled!!\n",
238 i);
239 }
240 }
241 }
242 }
243
244 return 0;
245 }
246
247 static void send_qos_list(struct nic *nic, struct list_head *head)
248 {
249 struct qos_entry_s *entry, *n;
250
251 list_for_each_entry_safe(entry, n, head, list) {
252 list_del(&entry->list);
253 free_qos_entry(entry);
254 gdm_wimax_send_tx(entry->skb, entry->dev);
255 }
256 }
257
258 int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
259 {
260 struct nic *nic = netdev_priv(dev);
261 int index;
262 struct qos_cb_s *qcb = &nic->qos;
263 unsigned long flags;
264 struct ethhdr *ethh = (struct ethhdr *) (skb->data + HCI_HEADER_SIZE);
265 struct iphdr *iph = (struct iphdr *) ((char *) ethh + ETH_HLEN);
266 struct tcphdr *tcph;
267 struct qos_entry_s *entry = NULL;
268 struct list_head send_list;
269 int ret = 0;
270
271 tcph = (struct tcphdr *) iph + iph->ihl*4;
272
273 if (B2H(ethh->h_proto) == ETH_P_IP) {
274 if (qcb->qos_list_cnt && !qos_free_list.cnt) {
275 entry = alloc_qos_entry();
276 entry->skb = skb;
277 entry->dev = dev;
278 netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
279 qcb->qos_list_cnt);
280 }
281
282 spin_lock_irqsave(&qcb->qos_lock, flags);
283 if (qcb->qos_list_cnt) {
284 index = get_qos_index(nic, (u8 *)iph, (u8 *) tcph);
285 if (index == -1)
286 index = qcb->qos_null_idx;
287
288 if (!entry) {
289 entry = alloc_qos_entry();
290 entry->skb = skb;
291 entry->dev = dev;
292 }
293
294 list_add_tail(&entry->list, &qcb->qos_list[index]);
295 extract_qos_list(nic, &send_list);
296 spin_unlock_irqrestore(&qcb->qos_lock, flags);
297 send_qos_list(nic, &send_list);
298 goto out;
299 }
300 spin_unlock_irqrestore(&qcb->qos_lock, flags);
301 if (entry)
302 free_qos_entry(entry);
303 }
304
305 ret = gdm_wimax_send_tx(skb, dev);
306 out:
307 return ret;
308 }
309
310 static u32 get_csr(struct qos_cb_s *qcb, u32 SFID, int mode)
311 {
312 int i;
313
314 for (i = 0; i < qcb->qos_list_cnt; i++) {
315 if (qcb->csr[i].SFID == SFID)
316 return i;
317 }
318
319 if (mode) {
320 for (i = 0; i < QOS_MAX; i++) {
321 if (qcb->csr[i].enabled == 0) {
322 qcb->csr[i].enabled = 1;
323 qcb->qos_list_cnt++;
324 return i;
325 }
326 }
327 }
328 return -1;
329 }
330
331 #define QOS_CHANGE_DEL 0xFC
332 #define QOS_ADD 0xFD
333 #define QOS_REPORT 0xFE
334
335 void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
336 {
337 struct nic *nic = nic_ptr;
338 u32 i, SFID, index, pos;
339 u8 subCmdEvt;
340 struct qos_cb_s *qcb = &nic->qos;
341 struct qos_entry_s *entry, *n;
342 struct list_head send_list;
343 struct list_head free_list;
344 unsigned long flags;
345
346 subCmdEvt = (u8)buf[4];
347
348 if (subCmdEvt == QOS_REPORT) {
349 spin_lock_irqsave(&qcb->qos_lock, flags);
350 for (i = 0; i < qcb->qos_list_cnt; i++) {
351 SFID = ((buf[(i*5)+6]<<24)&0xff000000);
352 SFID += ((buf[(i*5)+7]<<16)&0xff0000);
353 SFID += ((buf[(i*5)+8]<<8)&0xff00);
354 SFID += (buf[(i*5)+9]);
355 index = get_csr(qcb, SFID, 0);
356 if (index == -1) {
357 spin_unlock_irqrestore(&qcb->qos_lock, flags);
358 netdev_err(nic->netdev, "QoS ERROR: No SF\n");
359 return;
360 }
361 qcb->csr[index].qos_buf_count = buf[(i*5)+10];
362 }
363
364 extract_qos_list(nic, &send_list);
365 spin_unlock_irqrestore(&qcb->qos_lock, flags);
366 send_qos_list(nic, &send_list);
367 return;
368 }
369
370 /* subCmdEvt == QOS_ADD || subCmdEvt == QOS_CHANG_DEL */
371 pos = 6;
372 SFID = ((buf[pos++]<<24)&0xff000000);
373 SFID += ((buf[pos++]<<16)&0xff0000);
374 SFID += ((buf[pos++]<<8)&0xff00);
375 SFID += (buf[pos++]);
376
377 index = get_csr(qcb, SFID, 1);
378 if (index == -1) {
379 netdev_err(nic->netdev,
380 "QoS ERROR: csr Update Error / Wrong index (%d) \n",
381 index);
382 return;
383 }
384
385 if (subCmdEvt == QOS_ADD) {
386 netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
387 SFID, index);
388
389 spin_lock_irqsave(&qcb->qos_lock, flags);
390 qcb->csr[index].SFID = SFID;
391 qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
392 qcb->csr[index].classifier_rule_en += buf[pos++];
393 if (qcb->csr[index].classifier_rule_en == 0)
394 qcb->qos_null_idx = index;
395 qcb->csr[index].ip2s_mask = buf[pos++];
396 qcb->csr[index].ip2s_lo = buf[pos++];
397 qcb->csr[index].ip2s_hi = buf[pos++];
398 qcb->csr[index].protocol = buf[pos++];
399 qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
400 qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
401 qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
402 qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
403 qcb->csr[index].ipsrc_addr[0] = buf[pos++];
404 qcb->csr[index].ipsrc_addr[1] = buf[pos++];
405 qcb->csr[index].ipsrc_addr[2] = buf[pos++];
406 qcb->csr[index].ipsrc_addr[3] = buf[pos++];
407 qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
408 qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
409 qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
410 qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
411 qcb->csr[index].ipdst_addr[0] = buf[pos++];
412 qcb->csr[index].ipdst_addr[1] = buf[pos++];
413 qcb->csr[index].ipdst_addr[2] = buf[pos++];
414 qcb->csr[index].ipdst_addr[3] = buf[pos++];
415 qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
416 qcb->csr[index].srcport_lo += buf[pos++];
417 qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
418 qcb->csr[index].srcport_hi += buf[pos++];
419 qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
420 qcb->csr[index].dstport_lo += buf[pos++];
421 qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
422 qcb->csr[index].dstport_hi += buf[pos++];
423
424 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
425 spin_unlock_irqrestore(&qcb->qos_lock, flags);
426 } else if (subCmdEvt == QOS_CHANGE_DEL) {
427 netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
428 SFID, index);
429
430 INIT_LIST_HEAD(&free_list);
431
432 spin_lock_irqsave(&qcb->qos_lock, flags);
433 qcb->csr[index].enabled = 0;
434 qcb->qos_list_cnt--;
435 qcb->qos_limit_size = 254/qcb->qos_list_cnt;
436
437 list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
438 list) {
439 list_move_tail(&entry->list, &free_list);
440 }
441 spin_unlock_irqrestore(&qcb->qos_lock, flags);
442 free_qos_entry_list(&free_list);
443 }
444 }
This page took 0.056431 seconds and 5 git commands to generate.