]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/gdm72xx/gdm_qos.c
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/etherdevice.h>
17 #include <asm/byteorder.h>
20 #include <linux/tcp.h>
21 #include <linux/if_ether.h>
23 #include "gdm_wimax.h"
27 #define MAX_FREE_LIST_CNT 32
29 struct list_head head
;
34 static void init_qos_entry_list(void)
36 qos_free_list
.cnt
= 0;
37 INIT_LIST_HEAD(&qos_free_list
.head
);
38 spin_lock_init(&qos_free_list
.lock
);
41 static void *alloc_qos_entry(void)
43 struct qos_entry_s
*entry
;
46 spin_lock_irqsave(&qos_free_list
.lock
, flags
);
47 if (qos_free_list
.cnt
) {
48 entry
= list_entry(qos_free_list
.head
.prev
, struct qos_entry_s
,
50 list_del(&entry
->list
);
52 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
55 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
57 return kmalloc(sizeof(*entry
), GFP_ATOMIC
);
60 static void free_qos_entry(void *entry
)
62 struct qos_entry_s
*qentry
= (struct qos_entry_s
*)entry
;
65 spin_lock_irqsave(&qos_free_list
.lock
, flags
);
66 if (qos_free_list
.cnt
< MAX_FREE_LIST_CNT
) {
67 list_add(&qentry
->list
, &qos_free_list
.head
);
69 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
72 spin_unlock_irqrestore(&qos_free_list
.lock
, flags
);
77 static void free_qos_entry_list(struct list_head
*free_list
)
79 struct qos_entry_s
*entry
, *n
;
82 list_for_each_entry_safe(entry
, n
, free_list
, list
) {
83 list_del(&entry
->list
);
88 pr_debug("%s: total_free_cnt=%d\n", __func__
, total_free
);
91 void gdm_qos_init(void *nic_ptr
)
93 struct nic
*nic
= nic_ptr
;
94 struct qos_cb_s
*qcb
= &nic
->qos
;
97 for (i
= 0; i
< QOS_MAX
; i
++) {
98 INIT_LIST_HEAD(&qcb
->qos_list
[i
]);
99 qcb
->csr
[i
].qos_buf_count
= 0;
100 qcb
->csr
[i
].enabled
= false;
103 qcb
->qos_list_cnt
= 0;
104 qcb
->qos_null_idx
= QOS_MAX
-1;
105 qcb
->qos_limit_size
= 255;
107 spin_lock_init(&qcb
->qos_lock
);
109 init_qos_entry_list();
112 void gdm_qos_release_list(void *nic_ptr
)
114 struct nic
*nic
= nic_ptr
;
115 struct qos_cb_s
*qcb
= &nic
->qos
;
117 struct qos_entry_s
*entry
, *n
;
118 struct list_head free_list
;
121 INIT_LIST_HEAD(&free_list
);
123 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
125 for (i
= 0; i
< QOS_MAX
; i
++) {
126 qcb
->csr
[i
].qos_buf_count
= 0;
127 qcb
->csr
[i
].enabled
= false;
130 qcb
->qos_list_cnt
= 0;
131 qcb
->qos_null_idx
= QOS_MAX
-1;
133 for (i
= 0; i
< QOS_MAX
; i
++) {
134 list_for_each_entry_safe(entry
, n
, &qcb
->qos_list
[i
], list
) {
135 list_move_tail(&entry
->list
, &free_list
);
138 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
139 free_qos_entry_list(&free_list
);
142 static int chk_ipv4_rule(struct gdm_wimax_csr_s
*csr
, u8
*stream
, u8
*port
)
146 if (csr
->classifier_rule_en
&IPTYPEOFSERVICE
) {
147 if (((stream
[1] & csr
->ip2s_mask
) < csr
->ip2s_lo
) ||
148 ((stream
[1] & csr
->ip2s_mask
) > csr
->ip2s_hi
))
152 if (csr
->classifier_rule_en
&PROTOCOL
) {
153 if (stream
[9] != csr
->protocol
)
157 if (csr
->classifier_rule_en
&IPMASKEDSRCADDRESS
) {
158 for (i
= 0; i
< 4; i
++) {
159 if ((stream
[12 + i
] & csr
->ipsrc_addrmask
[i
]) !=
160 (csr
->ipsrc_addr
[i
] & csr
->ipsrc_addrmask
[i
]))
165 if (csr
->classifier_rule_en
&IPMASKEDDSTADDRESS
) {
166 for (i
= 0; i
< 4; i
++) {
167 if ((stream
[16 + i
] & csr
->ipdst_addrmask
[i
]) !=
168 (csr
->ipdst_addr
[i
] & csr
->ipdst_addrmask
[i
]))
173 if (csr
->classifier_rule_en
&PROTOCOLSRCPORTRANGE
) {
174 i
= ((port
[0]<<8)&0xff00)+port
[1];
175 if ((i
< csr
->srcport_lo
) || (i
> csr
->srcport_hi
))
179 if (csr
->classifier_rule_en
&PROTOCOLDSTPORTRANGE
) {
180 i
= ((port
[2]<<8)&0xff00)+port
[3];
181 if ((i
< csr
->dstport_lo
) || (i
> csr
->dstport_hi
))
188 static int get_qos_index(struct nic
*nic
, u8
*iph
, u8
*tcpudph
)
191 struct qos_cb_s
*qcb
= &nic
->qos
;
193 if (iph
== NULL
|| tcpudph
== NULL
)
196 ip_ver
= (iph
[0]>>4)&0xf;
201 for (i
= 0; i
< QOS_MAX
; i
++) {
202 if (!qcb
->csr
[i
].enabled
)
204 if (!qcb
->csr
[i
].classifier_rule_en
)
206 if (chk_ipv4_rule(&qcb
->csr
[i
], iph
, tcpudph
) == 0)
213 static void extract_qos_list(struct nic
*nic
, struct list_head
*head
)
215 struct qos_cb_s
*qcb
= &nic
->qos
;
216 struct qos_entry_s
*entry
;
219 INIT_LIST_HEAD(head
);
221 for (i
= 0; i
< QOS_MAX
; i
++) {
222 if (!qcb
->csr
[i
].enabled
)
224 if (qcb
->csr
[i
].qos_buf_count
>= qcb
->qos_limit_size
)
226 if (list_empty(&qcb
->qos_list
[i
]))
229 entry
= list_entry(qcb
->qos_list
[i
].prev
, struct qos_entry_s
,
232 list_move_tail(&entry
->list
, head
);
233 qcb
->csr
[i
].qos_buf_count
++;
235 if (!list_empty(&qcb
->qos_list
[i
]))
236 netdev_warn(nic
->netdev
, "Index(%d) is piled!!\n", i
);
240 static void send_qos_list(struct nic
*nic
, struct list_head
*head
)
242 struct qos_entry_s
*entry
, *n
;
244 list_for_each_entry_safe(entry
, n
, head
, list
) {
245 list_del(&entry
->list
);
246 gdm_wimax_send_tx(entry
->skb
, entry
->dev
);
247 free_qos_entry(entry
);
251 int gdm_qos_send_hci_pkt(struct sk_buff
*skb
, struct net_device
*dev
)
253 struct nic
*nic
= netdev_priv(dev
);
255 struct qos_cb_s
*qcb
= &nic
->qos
;
257 struct ethhdr
*ethh
= (struct ethhdr
*)(skb
->data
+ HCI_HEADER_SIZE
);
258 struct iphdr
*iph
= (struct iphdr
*)((char *)ethh
+ ETH_HLEN
);
260 struct qos_entry_s
*entry
= NULL
;
261 struct list_head send_list
;
264 tcph
= (struct tcphdr
*)iph
+ iph
->ihl
*4;
266 if (ethh
->h_proto
== cpu_to_be16(ETH_P_IP
)) {
267 if (qcb
->qos_list_cnt
&& !qos_free_list
.cnt
) {
268 entry
= alloc_qos_entry();
271 netdev_dbg(dev
, "qcb->qos_list_cnt=%d\n",
275 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
276 if (qcb
->qos_list_cnt
) {
277 index
= get_qos_index(nic
, (u8
*)iph
, (u8
*)tcph
);
279 index
= qcb
->qos_null_idx
;
282 entry
= alloc_qos_entry();
287 list_add_tail(&entry
->list
, &qcb
->qos_list
[index
]);
288 extract_qos_list(nic
, &send_list
);
289 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
290 send_qos_list(nic
, &send_list
);
293 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
295 free_qos_entry(entry
);
298 ret
= gdm_wimax_send_tx(skb
, dev
);
303 static int get_csr(struct qos_cb_s
*qcb
, u32 sfid
, int mode
)
307 for (i
= 0; i
< qcb
->qos_list_cnt
; i
++) {
308 if (qcb
->csr
[i
].sfid
== sfid
)
313 for (i
= 0; i
< QOS_MAX
; i
++) {
314 if (!qcb
->csr
[i
].enabled
) {
315 qcb
->csr
[i
].enabled
= true;
324 #define QOS_CHANGE_DEL 0xFC
326 #define QOS_REPORT 0xFE
328 void gdm_recv_qos_hci_packet(void *nic_ptr
, u8
*buf
, int size
)
330 struct nic
*nic
= nic_ptr
;
334 struct qos_cb_s
*qcb
= &nic
->qos
;
335 struct qos_entry_s
*entry
, *n
;
336 struct list_head send_list
;
337 struct list_head free_list
;
340 sub_cmd_evt
= (u8
)buf
[4];
342 if (sub_cmd_evt
== QOS_REPORT
) {
343 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
344 for (i
= 0; i
< qcb
->qos_list_cnt
; i
++) {
345 sfid
= ((buf
[(i
*5)+6]<<24)&0xff000000);
346 sfid
+= ((buf
[(i
*5)+7]<<16)&0xff0000);
347 sfid
+= ((buf
[(i
*5)+8]<<8)&0xff00);
348 sfid
+= (buf
[(i
*5)+9]);
349 index
= get_csr(qcb
, sfid
, 0);
351 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
352 netdev_err(nic
->netdev
, "QoS ERROR: No SF\n");
355 qcb
->csr
[index
].qos_buf_count
= buf
[(i
*5)+10];
358 extract_qos_list(nic
, &send_list
);
359 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
360 send_qos_list(nic
, &send_list
);
364 /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
366 sfid
= ((buf
[pos
++]<<24)&0xff000000);
367 sfid
+= ((buf
[pos
++]<<16)&0xff0000);
368 sfid
+= ((buf
[pos
++]<<8)&0xff00);
369 sfid
+= (buf
[pos
++]);
371 index
= get_csr(qcb
, sfid
, 1);
373 netdev_err(nic
->netdev
,
374 "QoS ERROR: csr Update Error / Wrong index (%d)\n",
379 if (sub_cmd_evt
== QOS_ADD
) {
380 netdev_dbg(nic
->netdev
, "QOS_ADD SFID = 0x%x, index=%d\n",
383 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
384 qcb
->csr
[index
].sfid
= sfid
;
385 qcb
->csr
[index
].classifier_rule_en
= ((buf
[pos
++]<<8)&0xff00);
386 qcb
->csr
[index
].classifier_rule_en
+= buf
[pos
++];
387 if (qcb
->csr
[index
].classifier_rule_en
== 0)
388 qcb
->qos_null_idx
= index
;
389 qcb
->csr
[index
].ip2s_mask
= buf
[pos
++];
390 qcb
->csr
[index
].ip2s_lo
= buf
[pos
++];
391 qcb
->csr
[index
].ip2s_hi
= buf
[pos
++];
392 qcb
->csr
[index
].protocol
= buf
[pos
++];
393 qcb
->csr
[index
].ipsrc_addrmask
[0] = buf
[pos
++];
394 qcb
->csr
[index
].ipsrc_addrmask
[1] = buf
[pos
++];
395 qcb
->csr
[index
].ipsrc_addrmask
[2] = buf
[pos
++];
396 qcb
->csr
[index
].ipsrc_addrmask
[3] = buf
[pos
++];
397 qcb
->csr
[index
].ipsrc_addr
[0] = buf
[pos
++];
398 qcb
->csr
[index
].ipsrc_addr
[1] = buf
[pos
++];
399 qcb
->csr
[index
].ipsrc_addr
[2] = buf
[pos
++];
400 qcb
->csr
[index
].ipsrc_addr
[3] = buf
[pos
++];
401 qcb
->csr
[index
].ipdst_addrmask
[0] = buf
[pos
++];
402 qcb
->csr
[index
].ipdst_addrmask
[1] = buf
[pos
++];
403 qcb
->csr
[index
].ipdst_addrmask
[2] = buf
[pos
++];
404 qcb
->csr
[index
].ipdst_addrmask
[3] = buf
[pos
++];
405 qcb
->csr
[index
].ipdst_addr
[0] = buf
[pos
++];
406 qcb
->csr
[index
].ipdst_addr
[1] = buf
[pos
++];
407 qcb
->csr
[index
].ipdst_addr
[2] = buf
[pos
++];
408 qcb
->csr
[index
].ipdst_addr
[3] = buf
[pos
++];
409 qcb
->csr
[index
].srcport_lo
= ((buf
[pos
++]<<8)&0xff00);
410 qcb
->csr
[index
].srcport_lo
+= buf
[pos
++];
411 qcb
->csr
[index
].srcport_hi
= ((buf
[pos
++]<<8)&0xff00);
412 qcb
->csr
[index
].srcport_hi
+= buf
[pos
++];
413 qcb
->csr
[index
].dstport_lo
= ((buf
[pos
++]<<8)&0xff00);
414 qcb
->csr
[index
].dstport_lo
+= buf
[pos
++];
415 qcb
->csr
[index
].dstport_hi
= ((buf
[pos
++]<<8)&0xff00);
416 qcb
->csr
[index
].dstport_hi
+= buf
[pos
++];
418 qcb
->qos_limit_size
= 254/qcb
->qos_list_cnt
;
419 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
420 } else if (sub_cmd_evt
== QOS_CHANGE_DEL
) {
421 netdev_dbg(nic
->netdev
, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
424 INIT_LIST_HEAD(&free_list
);
426 spin_lock_irqsave(&qcb
->qos_lock
, flags
);
427 qcb
->csr
[index
].enabled
= false;
429 qcb
->qos_limit_size
= 254/qcb
->qos_list_cnt
;
431 list_for_each_entry_safe(entry
, n
, &qcb
->qos_list
[index
],
433 list_move_tail(&entry
->list
, &free_list
);
435 spin_unlock_irqrestore(&qcb
->qos_lock
, flags
);
436 free_qos_entry_list(&free_list
);