]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
qlcnic: Enable support for 844X adapter
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_io.c
CommitLineData
577ae39d
JK
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
c70001a9
SC
8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
10#include <net/ip.h>
11#include <linux/ipv6.h>
1bcac3b0 12#include <net/checksum.h>
c70001a9
SC
13
14#include "qlcnic.h"
15
c70001a9
SC
16#define TX_ETHER_PKT 0x01
17#define TX_TCP_PKT 0x02
18#define TX_UDP_PKT 0x03
19#define TX_IP_PKT 0x04
20#define TX_TCP_LSO 0x05
21#define TX_TCP_LSO6 0x06
22#define TX_TCPV6_PKT 0x0b
23#define TX_UDPV6_PKT 0x0c
24#define FLAGS_VLAN_TAGGED 0x10
25#define FLAGS_VLAN_OOB 0x40
26
27#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
28 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
29#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
30 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
31#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
32 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
33
34#define qlcnic_set_tx_port(_desc, _port) \
35 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
36
37#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
38 ((_desc)->flags_opcode |= \
39 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
40
41#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
42 ((_desc)->nfrags__length = \
43 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
44
45/* owner bits of status_desc */
46#define STATUS_OWNER_HOST (0x1ULL << 56)
47#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
48
49/* Status descriptor:
50 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
51 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
52 53-55 desc_cnt, 56-57 owner, 58-63 opcode
53 */
54#define qlcnic_get_sts_port(sts_data) \
55 ((sts_data) & 0x0F)
56#define qlcnic_get_sts_status(sts_data) \
57 (((sts_data) >> 4) & 0x0F)
58#define qlcnic_get_sts_type(sts_data) \
59 (((sts_data) >> 8) & 0x0F)
60#define qlcnic_get_sts_totallength(sts_data) \
61 (((sts_data) >> 12) & 0xFFFF)
62#define qlcnic_get_sts_refhandle(sts_data) \
63 (((sts_data) >> 28) & 0xFFFF)
64#define qlcnic_get_sts_prot(sts_data) \
65 (((sts_data) >> 44) & 0x0F)
66#define qlcnic_get_sts_pkt_offset(sts_data) \
67 (((sts_data) >> 48) & 0x1F)
68#define qlcnic_get_sts_desc_cnt(sts_data) \
69 (((sts_data) >> 53) & 0x7)
70#define qlcnic_get_sts_opcode(sts_data) \
71 (((sts_data) >> 58) & 0x03F)
72
73#define qlcnic_get_lro_sts_refhandle(sts_data) \
5796bd04 74 ((sts_data) & 0x07FFF)
c70001a9
SC
75#define qlcnic_get_lro_sts_length(sts_data) \
76 (((sts_data) >> 16) & 0x0FFFF)
77#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
78 (((sts_data) >> 32) & 0x0FF)
79#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
80 (((sts_data) >> 40) & 0x0FF)
81#define qlcnic_get_lro_sts_timestamp(sts_data) \
82 (((sts_data) >> 48) & 0x1)
83#define qlcnic_get_lro_sts_type(sts_data) \
84 (((sts_data) >> 49) & 0x7)
85#define qlcnic_get_lro_sts_push_flag(sts_data) \
86 (((sts_data) >> 52) & 0x1)
87#define qlcnic_get_lro_sts_seq_number(sts_data) \
88 ((sts_data) & 0x0FFFFFFFF)
89#define qlcnic_get_lro_sts_mss(sts_data1) \
90 ((sts_data1 >> 32) & 0x0FFFF)
91
99e85879
SS
92#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
93
c70001a9
SC
94/* opcode field in status_desc */
95#define QLCNIC_SYN_OFFLOAD 0x03
96#define QLCNIC_RXPKT_DESC 0x04
97#define QLCNIC_OLD_RXPKT_DESC 0x3f
98#define QLCNIC_RESPONSE_DESC 0x05
99#define QLCNIC_LRO_DESC 0x12
100
4be41e92
SC
101#define QLCNIC_TX_POLL_BUDGET 128
102#define QLCNIC_TCP_HDR_SIZE 20
103#define QLCNIC_TCP_TS_OPTION_SIZE 12
104#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
105#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
106
107#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
108
c70001a9
SC
109/* for status field in status_desc */
110#define STATUS_CKSUM_LOOP 0
111#define STATUS_CKSUM_OK 2
d17dd0d9 112
4be41e92
SC
113#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
114#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
115#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
116#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
117#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
118#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
119#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
120#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
121#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
122#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
123#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
124#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
125#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
126
127struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
128 struct qlcnic_host_rds_ring *, u16, u16);
129
130inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
132{
133 writel(0, tx_ring->crb_intr_mask);
134}
135
136inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
137 struct qlcnic_host_tx_ring *tx_ring)
138{
139 writel(1, tx_ring->crb_intr_mask);
140}
141
142static inline u8 qlcnic_mac_hash(u64 mac)
143{
144 return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
145}
146
147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
148 u16 handle, u8 ring_id)
149{
15ca140f 150 if (qlcnic_83xx_check(adapter))
4be41e92
SC
151 return handle | (ring_id << 15);
152 else
153 return handle;
154}
155
53643a75
SS
156static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
157{
158 return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
159}
160
e0d138d9
SS
161static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
162 struct qlcnic_filter *fil,
163 void *addr, u16 vlan_id)
164{
165 int ret;
166 u8 op;
167
168 op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
169 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
170 if (ret)
171 return;
172
173 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
174 ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
175 if (!ret) {
176 hlist_del(&fil->fnode);
177 adapter->rx_fhash.fnum--;
178 }
179}
180
181static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
182 void *addr, u16 vlan_id)
183{
184 struct qlcnic_filter *tmp_fil = NULL;
185 struct hlist_node *n;
186
187 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
188 if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
189 tmp_fil->vlan_id == vlan_id)
190 return tmp_fil;
191 }
192
193 return NULL;
194}
195
53643a75 196void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
f80bc8fe 197 int loopback_pkt, u16 vlan_id)
53643a75
SS
198{
199 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
200 struct qlcnic_filter *fil, *tmp_fil;
53643a75
SS
201 struct hlist_head *head;
202 unsigned long time;
203 u64 src_addr = 0;
e0d138d9 204 u8 hindex, op;
53643a75
SS
205 int ret;
206
207 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
e0d138d9
SS
208 hindex = qlcnic_mac_hash(src_addr) &
209 (adapter->fhash.fbucket_size - 1);
53643a75
SS
210
211 if (loopback_pkt) {
212 if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
213 return;
214
53643a75
SS
215 head = &(adapter->rx_fhash.fhead[hindex]);
216
e0d138d9
SS
217 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
218 if (tmp_fil) {
219 time = tmp_fil->ftime;
220 if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
221 tmp_fil->ftime = jiffies;
222 return;
53643a75
SS
223 }
224
225 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
226 if (!fil)
227 return;
228
229 fil->ftime = jiffies;
230 memcpy(fil->faddr, &src_addr, ETH_ALEN);
231 fil->vlan_id = vlan_id;
232 spin_lock(&adapter->rx_mac_learn_lock);
233 hlist_add_head(&(fil->fnode), head);
234 adapter->rx_fhash.fnum++;
235 spin_unlock(&adapter->rx_mac_learn_lock);
236 } else {
e0d138d9 237 head = &adapter->fhash.fhead[hindex];
53643a75 238
e0d138d9 239 spin_lock(&adapter->mac_learn_lock);
53643a75 240
e0d138d9
SS
241 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
242 if (tmp_fil) {
53643a75
SS
243 op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
244 ret = qlcnic_sre_macaddr_change(adapter,
245 (u8 *)&src_addr,
246 vlan_id, op);
247 if (!ret) {
e0d138d9
SS
248 hlist_del(&tmp_fil->fnode);
249 adapter->fhash.fnum--;
53643a75 250 }
e0d138d9
SS
251
252 spin_unlock(&adapter->mac_learn_lock);
253
254 return;
53643a75 255 }
e0d138d9
SS
256
257 spin_unlock(&adapter->mac_learn_lock);
258
259 head = &adapter->rx_fhash.fhead[hindex];
260
261 spin_lock(&adapter->rx_mac_learn_lock);
262
263 tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
264 if (tmp_fil)
265 qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
266 vlan_id);
267
53643a75
SS
268 spin_unlock(&adapter->rx_mac_learn_lock);
269 }
270}
271
7e2cf4fe 272void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
f80bc8fe 273 u16 vlan_id)
c70001a9
SC
274{
275 struct cmd_desc_type0 *hwdesc;
276 struct qlcnic_nic_req *req;
277 struct qlcnic_mac_req *mac_req;
278 struct qlcnic_vlan_req *vlan_req;
7e2cf4fe 279 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
c70001a9
SC
280 u32 producer;
281 u64 word;
282
283 producer = tx_ring->producer;
284 hwdesc = &tx_ring->desc_head[tx_ring->producer];
285
286 req = (struct qlcnic_nic_req *)hwdesc;
287 memset(req, 0, sizeof(struct qlcnic_nic_req));
288 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
289
290 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
291 req->req_hdr = cpu_to_le64(word);
292
293 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
294 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
4a99ab56 295 memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
c70001a9
SC
296
297 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
f80bc8fe 298 vlan_req->vlan_id = cpu_to_le16(vlan_id);
c70001a9
SC
299
300 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
301 smp_mb();
302}
303
d17dd0d9 304static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
d17dd0d9
SC
305 struct cmd_desc_type0 *first_desc,
306 struct sk_buff *skb)
c70001a9 307{
c70001a9 308 struct qlcnic_filter *fil, *tmp_fil;
b67bfe0d 309 struct hlist_node *n;
c70001a9 310 struct hlist_head *head;
4be41e92 311 struct net_device *netdev = adapter->netdev;
7e2cf4fe 312 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
c70001a9 313 u64 src_addr = 0;
f80bc8fe 314 u16 vlan_id = 0;
c70001a9
SC
315 u8 hindex;
316
4be41e92 317 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
c70001a9
SC
318 return;
319
4be41e92
SC
320 if (adapter->fhash.fnum >= adapter->fhash.fmax) {
321 adapter->stats.mac_filter_limit_overrun++;
322 netdev_info(netdev, "Can not add more than %d mac addresses\n",
323 adapter->fhash.fmax);
c70001a9 324 return;
4be41e92 325 }
c70001a9 326
c70001a9 327 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
4be41e92 328 hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
c70001a9
SC
329 head = &(adapter->fhash.fhead[hindex]);
330
b67bfe0d 331 hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
c70001a9 332 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
7e2cf4fe 333 tmp_fil->vlan_id == vlan_id) {
d17dd0d9 334 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
7e2cf4fe
SC
335 qlcnic_change_filter(adapter, &src_addr,
336 vlan_id);
c70001a9
SC
337 tmp_fil->ftime = jiffies;
338 return;
339 }
340 }
341
342 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
343 if (!fil)
344 return;
345
7e2cf4fe 346 qlcnic_change_filter(adapter, &src_addr, vlan_id);
c70001a9
SC
347 fil->ftime = jiffies;
348 fil->vlan_id = vlan_id;
349 memcpy(fil->faddr, &src_addr, ETH_ALEN);
350 spin_lock(&adapter->mac_learn_lock);
351 hlist_add_head(&(fil->fnode), head);
352 adapter->fhash.fnum++;
353 spin_unlock(&adapter->mac_learn_lock);
354}
355
d17dd0d9
SC
356static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
357 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
c70001a9 358{
d17dd0d9 359 u8 l4proto, opcode = 0, hdr_len = 0;
c70001a9 360 u16 flags = 0, vlan_tci = 0;
d17dd0d9 361 int copied, offset, copy_len, size;
c70001a9
SC
362 struct cmd_desc_type0 *hwdesc;
363 struct vlan_ethhdr *vh;
364 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
365 u16 protocol = ntohs(skb->protocol);
366 u32 producer = tx_ring->producer;
367
368 if (protocol == ETH_P_8021Q) {
369 vh = (struct vlan_ethhdr *)skb->data;
370 flags = FLAGS_VLAN_TAGGED;
371 vlan_tci = ntohs(vh->h_vlan_TCI);
372 protocol = ntohs(vh->h_vlan_encapsulated_proto);
373 } else if (vlan_tx_tag_present(skb)) {
374 flags = FLAGS_VLAN_OOB;
375 vlan_tci = vlan_tx_tag_get(skb);
376 }
91b7282b 377 if (unlikely(adapter->tx_pvid)) {
c70001a9
SC
378 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
379 return -EIO;
380 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
381 goto set_flags;
382
383 flags = FLAGS_VLAN_OOB;
91b7282b 384 vlan_tci = adapter->tx_pvid;
c70001a9
SC
385 }
386set_flags:
387 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
388 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
389
390 if (*(skb->data) & BIT_0) {
391 flags |= BIT_0;
392 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
393 }
394 opcode = TX_ETHER_PKT;
3eead213 395 if (skb_is_gso(skb)) {
c70001a9 396 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
c70001a9
SC
397 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
398 first_desc->total_hdr_length = hdr_len;
c70001a9
SC
399 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
400
401 /* For LSO, we need to copy the MAC/IP/TCP headers into
402 * the descriptor ring */
403 copied = 0;
404 offset = 2;
405
406 if (flags & FLAGS_VLAN_OOB) {
407 first_desc->total_hdr_length += VLAN_HLEN;
408 first_desc->tcp_hdr_offset = VLAN_HLEN;
409 first_desc->ip_hdr_offset = VLAN_HLEN;
d17dd0d9 410
c70001a9
SC
411 /* Only in case of TSO on vlan device */
412 flags |= FLAGS_VLAN_TAGGED;
413
414 /* Create a TSO vlan header template for firmware */
c70001a9
SC
415 hwdesc = &tx_ring->desc_head[producer];
416 tx_ring->cmd_buf_arr[producer].skb = NULL;
417
418 copy_len = min((int)sizeof(struct cmd_desc_type0) -
d17dd0d9 419 offset, hdr_len + VLAN_HLEN);
c70001a9
SC
420
421 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
422 skb_copy_from_linear_data(skb, vh, 12);
423 vh->h_vlan_proto = htons(ETH_P_8021Q);
424 vh->h_vlan_TCI = htons(vlan_tci);
425
426 skb_copy_from_linear_data_offset(skb, 12,
d17dd0d9
SC
427 (char *)vh + 16,
428 copy_len - 16);
c70001a9
SC
429 copied = copy_len - VLAN_HLEN;
430 offset = 0;
c70001a9
SC
431 producer = get_next_index(producer, tx_ring->num_desc);
432 }
433
434 while (copied < hdr_len) {
d17dd0d9
SC
435 size = (int)sizeof(struct cmd_desc_type0) - offset;
436 copy_len = min(size, (hdr_len - copied));
c70001a9
SC
437 hwdesc = &tx_ring->desc_head[producer];
438 tx_ring->cmd_buf_arr[producer].skb = NULL;
c70001a9 439 skb_copy_from_linear_data_offset(skb, copied,
d17dd0d9
SC
440 (char *)hwdesc +
441 offset, copy_len);
c70001a9
SC
442 copied += copy_len;
443 offset = 0;
c70001a9
SC
444 producer = get_next_index(producer, tx_ring->num_desc);
445 }
446
447 tx_ring->producer = producer;
448 smp_mb();
449 adapter->stats.lso_frames++;
450
451 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c70001a9
SC
452 if (protocol == ETH_P_IP) {
453 l4proto = ip_hdr(skb)->protocol;
454
455 if (l4proto == IPPROTO_TCP)
456 opcode = TX_TCP_PKT;
457 else if (l4proto == IPPROTO_UDP)
458 opcode = TX_UDP_PKT;
459 } else if (protocol == ETH_P_IPV6) {
460 l4proto = ipv6_hdr(skb)->nexthdr;
461
462 if (l4proto == IPPROTO_TCP)
463 opcode = TX_TCPV6_PKT;
464 else if (l4proto == IPPROTO_UDP)
465 opcode = TX_UDPV6_PKT;
466 }
467 }
468 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
469 first_desc->ip_hdr_offset += skb_network_offset(skb);
470 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
471
472 return 0;
473}
474
d17dd0d9
SC
475static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
476 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
477{
478 struct qlcnic_skb_frag *nf;
479 struct skb_frag_struct *frag;
480 int i, nr_frags;
481 dma_addr_t map;
482
483 nr_frags = skb_shinfo(skb)->nr_frags;
484 nf = &pbuf->frag_array[0];
485
d17dd0d9
SC
486 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
487 PCI_DMA_TODEVICE);
c70001a9
SC
488 if (pci_dma_mapping_error(pdev, map))
489 goto out_err;
490
491 nf->dma = map;
492 nf->length = skb_headlen(skb);
493
494 for (i = 0; i < nr_frags; i++) {
495 frag = &skb_shinfo(skb)->frags[i];
496 nf = &pbuf->frag_array[i+1];
c70001a9
SC
497 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
498 DMA_TO_DEVICE);
499 if (dma_mapping_error(&pdev->dev, map))
500 goto unwind;
501
502 nf->dma = map;
503 nf->length = skb_frag_size(frag);
504 }
505
506 return 0;
507
508unwind:
509 while (--i >= 0) {
510 nf = &pbuf->frag_array[i+1];
511 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
512 }
513
514 nf = &pbuf->frag_array[0];
515 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
516
517out_err:
518 return -ENOMEM;
519}
520
d17dd0d9
SC
521static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
522 struct qlcnic_cmd_buffer *pbuf)
c70001a9
SC
523{
524 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
d17dd0d9 525 int i, nr_frags = skb_shinfo(skb)->nr_frags;
c70001a9
SC
526
527 for (i = 0; i < nr_frags; i++) {
528 nf = &pbuf->frag_array[i+1];
529 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
530 }
531
532 nf = &pbuf->frag_array[0];
533 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
534 pbuf->skb = NULL;
535}
536
d17dd0d9 537static inline void qlcnic_clear_cmddesc(u64 *desc)
c70001a9
SC
538{
539 desc[0] = 0ULL;
540 desc[2] = 0ULL;
541 desc[7] = 0ULL;
542}
543
d17dd0d9 544netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
c70001a9
SC
545{
546 struct qlcnic_adapter *adapter = netdev_priv(netdev);
547 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
548 struct qlcnic_cmd_buffer *pbuf;
549 struct qlcnic_skb_frag *buffrag;
550 struct cmd_desc_type0 *hwdesc, *first_desc;
551 struct pci_dev *pdev;
552 struct ethhdr *phdr;
d17dd0d9
SC
553 int i, k, frag_count, delta = 0;
554 u32 producer, num_txd;
c70001a9 555
d17dd0d9 556 num_txd = tx_ring->num_desc;
c70001a9
SC
557
558 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
559 netif_stop_queue(netdev);
560 return NETDEV_TX_BUSY;
561 }
562
563 if (adapter->flags & QLCNIC_MACSPOOF) {
564 phdr = (struct ethhdr *)skb->data;
565 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
566 goto drop_packet;
567 }
568
569 frag_count = skb_shinfo(skb)->nr_frags + 1;
570 /* 14 frags supported for normal packet and
571 * 32 frags supported for TSO packet
572 */
573 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
c70001a9
SC
574 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
575 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
576
577 if (!__pskb_pull_tail(skb, delta))
578 goto drop_packet;
579
580 frag_count = 1 + skb_shinfo(skb)->nr_frags;
581 }
582
583 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
584 netif_stop_queue(netdev);
d17dd0d9 585 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
c70001a9 586 netif_start_queue(netdev);
d17dd0d9 587 } else {
c70001a9
SC
588 adapter->stats.xmit_off++;
589 return NETDEV_TX_BUSY;
590 }
591 }
592
593 producer = tx_ring->producer;
594 pbuf = &tx_ring->cmd_buf_arr[producer];
c70001a9 595 pdev = adapter->pdev;
d17dd0d9
SC
596 first_desc = &tx_ring->desc_head[producer];
597 hwdesc = &tx_ring->desc_head[producer];
c70001a9
SC
598 qlcnic_clear_cmddesc((u64 *)hwdesc);
599
600 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
601 adapter->stats.tx_dma_map_error++;
602 goto drop_packet;
603 }
604
605 pbuf->skb = skb;
606 pbuf->frag_count = frag_count;
607
608 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
609 qlcnic_set_tx_port(first_desc, adapter->portnum);
610
611 for (i = 0; i < frag_count; i++) {
c70001a9
SC
612 k = i % 4;
613
614 if ((k == 0) && (i > 0)) {
615 /* move to next desc.*/
616 producer = get_next_index(producer, num_txd);
617 hwdesc = &tx_ring->desc_head[producer];
618 qlcnic_clear_cmddesc((u64 *)hwdesc);
619 tx_ring->cmd_buf_arr[producer].skb = NULL;
620 }
621
622 buffrag = &pbuf->frag_array[i];
c70001a9
SC
623 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
624 switch (k) {
625 case 0:
626 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
627 break;
628 case 1:
629 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
630 break;
631 case 2:
632 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
633 break;
634 case 3:
635 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
636 break;
637 }
638 }
639
640 tx_ring->producer = get_next_index(producer, num_txd);
641 smp_mb();
642
643 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
644 goto unwind_buff;
645
fe1adc6b 646 if (adapter->drv_mac_learn)
4be41e92 647 qlcnic_send_filter(adapter, first_desc, skb);
c70001a9
SC
648
649 adapter->stats.txbytes += skb->len;
650 adapter->stats.xmitcalled++;
651
652 qlcnic_update_cmd_producer(tx_ring);
653
654 return NETDEV_TX_OK;
655
656unwind_buff:
657 qlcnic_unmap_buffers(pdev, skb, pbuf);
658drop_packet:
659 adapter->stats.txdropped++;
660 dev_kfree_skb_any(skb);
661 return NETDEV_TX_OK;
662}
663
7f966452 664void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
c70001a9
SC
665{
666 struct net_device *netdev = adapter->netdev;
667
668 if (adapter->ahw->linkup && !linkup) {
669 netdev_info(netdev, "NIC Link is down\n");
670 adapter->ahw->linkup = 0;
671 if (netif_running(netdev)) {
672 netif_carrier_off(netdev);
673 netif_stop_queue(netdev);
674 }
675 } else if (!adapter->ahw->linkup && linkup) {
676 netdev_info(netdev, "NIC Link is up\n");
677 adapter->ahw->linkup = 1;
678 if (netif_running(netdev)) {
679 netif_carrier_on(netdev);
680 netif_wake_queue(netdev);
681 }
682 }
683}
684
d17dd0d9
SC
685static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
686 struct qlcnic_host_rds_ring *rds_ring,
687 struct qlcnic_rx_buffer *buffer)
c70001a9
SC
688{
689 struct sk_buff *skb;
690 dma_addr_t dma;
691 struct pci_dev *pdev = adapter->pdev;
692
693 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
694 if (!skb) {
695 adapter->stats.skb_alloc_failure++;
696 return -ENOMEM;
697 }
698
699 skb_reserve(skb, NET_IP_ALIGN);
4be41e92
SC
700 dma = pci_map_single(pdev, skb->data,
701 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
c70001a9
SC
702
703 if (pci_dma_mapping_error(pdev, dma)) {
704 adapter->stats.rx_dma_map_error++;
705 dev_kfree_skb_any(skb);
706 return -ENOMEM;
707 }
708
709 buffer->skb = skb;
710 buffer->dma = dma;
711
712 return 0;
713}
714
715static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
4be41e92
SC
716 struct qlcnic_host_rds_ring *rds_ring,
717 u8 ring_id)
c70001a9
SC
718{
719 struct rcv_desc *pdesc;
720 struct qlcnic_rx_buffer *buffer;
721 int count = 0;
4be41e92 722 uint32_t producer, handle;
c70001a9
SC
723 struct list_head *head;
724
725 if (!spin_trylock(&rds_ring->lock))
726 return;
727
728 producer = rds_ring->producer;
c70001a9 729 head = &rds_ring->free_list;
d17dd0d9 730 while (!list_empty(head)) {
c70001a9
SC
731 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
732
733 if (!buffer->skb) {
734 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
735 break;
736 }
c70001a9
SC
737 count++;
738 list_del(&buffer->list);
739
740 /* make a rcv descriptor */
741 pdesc = &rds_ring->desc_head[producer];
4be41e92
SC
742 handle = qlcnic_get_ref_handle(adapter,
743 buffer->ref_handle, ring_id);
744 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9
SC
745 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
746 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
c70001a9
SC
747 producer = get_next_index(producer, rds_ring->num_desc);
748 }
c70001a9
SC
749 if (count) {
750 rds_ring->producer = producer;
751 writel((producer - 1) & (rds_ring->num_desc - 1),
d17dd0d9 752 rds_ring->crb_rcv_producer);
c70001a9
SC
753 }
754 spin_unlock(&rds_ring->lock);
755}
756
4be41e92
SC
757static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
758 struct qlcnic_host_tx_ring *tx_ring,
759 int budget)
c70001a9
SC
760{
761 u32 sw_consumer, hw_consumer;
d17dd0d9 762 int i, done, count = 0;
c70001a9
SC
763 struct qlcnic_cmd_buffer *buffer;
764 struct pci_dev *pdev = adapter->pdev;
765 struct net_device *netdev = adapter->netdev;
766 struct qlcnic_skb_frag *frag;
c70001a9
SC
767
768 if (!spin_trylock(&adapter->tx_clean_lock))
769 return 1;
770
771 sw_consumer = tx_ring->sw_consumer;
772 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
773
774 while (sw_consumer != hw_consumer) {
775 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
776 if (buffer->skb) {
777 frag = &buffer->frag_array[0];
778 pci_unmap_single(pdev, frag->dma, frag->length,
779 PCI_DMA_TODEVICE);
780 frag->dma = 0ULL;
781 for (i = 1; i < buffer->frag_count; i++) {
782 frag++;
783 pci_unmap_page(pdev, frag->dma, frag->length,
784 PCI_DMA_TODEVICE);
785 frag->dma = 0ULL;
786 }
c70001a9
SC
787 adapter->stats.xmitfinished++;
788 dev_kfree_skb_any(buffer->skb);
789 buffer->skb = NULL;
790 }
791
792 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
4be41e92 793 if (++count >= budget)
c70001a9
SC
794 break;
795 }
796
797 if (count && netif_running(netdev)) {
798 tx_ring->sw_consumer = sw_consumer;
c70001a9 799 smp_mb();
c70001a9
SC
800 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
801 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
802 netif_wake_queue(netdev);
803 adapter->stats.xmit_on++;
804 }
805 }
806 adapter->tx_timeo_cnt = 0;
807 }
808 /*
809 * If everything is freed up to consumer then check if the ring is full
810 * If the ring is full then check if more needs to be freed and
811 * schedule the call back again.
812 *
813 * This happens when there are 2 CPUs. One could be freeing and the
814 * other filling it. If the ring is full when we get out of here and
815 * the card has already interrupted the host then the host can miss the
816 * interrupt.
817 *
818 * There is still a possible race condition and the host could miss an
819 * interrupt. The card has to take care of this.
820 */
821 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
822 done = (sw_consumer == hw_consumer);
823 spin_unlock(&adapter->tx_clean_lock);
824
825 return done;
826}
827
828static int qlcnic_poll(struct napi_struct *napi, int budget)
829{
4be41e92 830 int tx_complete, work_done;
d17dd0d9
SC
831 struct qlcnic_host_sds_ring *sds_ring;
832 struct qlcnic_adapter *adapter;
c70001a9 833
d17dd0d9
SC
834 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
835 adapter = sds_ring->adapter;
4be41e92
SC
836 tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
837 budget);
c70001a9 838 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
c70001a9
SC
839 if ((work_done < budget) && tx_complete) {
840 napi_complete(&sds_ring->napi);
841 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
842 qlcnic_enable_int(sds_ring);
843 }
844
845 return work_done;
846}
847
848static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
849{
d17dd0d9
SC
850 struct qlcnic_host_sds_ring *sds_ring;
851 struct qlcnic_adapter *adapter;
c70001a9
SC
852 int work_done;
853
d17dd0d9
SC
854 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
855 adapter = sds_ring->adapter;
856
c70001a9
SC
857 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
858
859 if (work_done < budget) {
860 napi_complete(&sds_ring->napi);
861 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
862 qlcnic_enable_int(sds_ring);
863 }
864
865 return work_done;
866}
867
d17dd0d9
SC
868static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
869 struct qlcnic_fw_msg *msg)
c70001a9
SC
870{
871 u32 cable_OUI;
d17dd0d9
SC
872 u16 cable_len, link_speed;
873 u8 link_status, module, duplex, autoneg, lb_status = 0;
c70001a9
SC
874 struct net_device *netdev = adapter->netdev;
875
79788450 876 adapter->ahw->has_link_events = 1;
c70001a9
SC
877
878 cable_OUI = msg->body[1] & 0xffffffff;
879 cable_len = (msg->body[1] >> 32) & 0xffff;
880 link_speed = (msg->body[1] >> 48) & 0xffff;
881
882 link_status = msg->body[2] & 0xff;
883 duplex = (msg->body[2] >> 16) & 0xff;
884 autoneg = (msg->body[2] >> 24) & 0xff;
885 lb_status = (msg->body[2] >> 32) & 0x3;
886
887 module = (msg->body[2] >> 8) & 0xff;
888 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
d17dd0d9
SC
889 dev_info(&netdev->dev,
890 "unsupported cable: OUI 0x%x, length %d\n",
891 cable_OUI, cable_len);
c70001a9
SC
892 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
893 dev_info(&netdev->dev, "unsupported cable length %d\n",
d17dd0d9 894 cable_len);
c70001a9
SC
895
896 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
897 lb_status == QLCNIC_ELB_MODE))
898 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
899
900 qlcnic_advert_link_change(adapter, link_status);
901
902 if (duplex == LINKEVENT_FULL_DUPLEX)
79788450 903 adapter->ahw->link_duplex = DUPLEX_FULL;
c70001a9 904 else
79788450 905 adapter->ahw->link_duplex = DUPLEX_HALF;
c70001a9 906
79788450
SC
907 adapter->ahw->module_type = module;
908 adapter->ahw->link_autoneg = autoneg;
c70001a9
SC
909
910 if (link_status) {
79788450 911 adapter->ahw->link_speed = link_speed;
c70001a9 912 } else {
79788450
SC
913 adapter->ahw->link_speed = SPEED_UNKNOWN;
914 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
c70001a9
SC
915 }
916}
917
d17dd0d9
SC
918static void qlcnic_handle_fw_message(int desc_cnt, int index,
919 struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
920{
921 struct qlcnic_fw_msg msg;
922 struct status_desc *desc;
923 struct qlcnic_adapter *adapter;
924 struct device *dev;
925 int i = 0, opcode, ret;
926
927 while (desc_cnt > 0 && i < 8) {
928 desc = &sds_ring->desc_head[index];
929 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
930 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
931
932 index = get_next_index(index, sds_ring->num_desc);
933 desc_cnt--;
934 }
935
936 adapter = sds_ring->adapter;
937 dev = &adapter->pdev->dev;
938 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
939
940 switch (opcode) {
941 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
942 qlcnic_handle_linkevent(adapter, &msg);
943 break;
944 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
945 ret = (u32)(msg.body[1]);
946 switch (ret) {
947 case 0:
948 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
949 break;
950 case 1:
951 dev_info(dev, "loopback already in progress\n");
b9c11984 952 adapter->ahw->diag_cnt = -EINPROGRESS;
c70001a9
SC
953 break;
954 case 2:
955 dev_info(dev, "loopback cable is not connected\n");
b9c11984 956 adapter->ahw->diag_cnt = -ENODEV;
c70001a9
SC
957 break;
958 default:
d17dd0d9
SC
959 dev_info(dev,
960 "loopback configure request failed, err %x\n",
961 ret);
b9c11984 962 adapter->ahw->diag_cnt = -EIO;
c70001a9
SC
963 break;
964 }
965 break;
966 default:
967 break;
968 }
969}
970
4be41e92
SC
971struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
972 struct qlcnic_host_rds_ring *ring,
973 u16 index, u16 cksum)
c70001a9
SC
974{
975 struct qlcnic_rx_buffer *buffer;
976 struct sk_buff *skb;
977
4be41e92 978 buffer = &ring->rx_buf_arr[index];
c70001a9
SC
979 if (unlikely(buffer->skb == NULL)) {
980 WARN_ON(1);
981 return NULL;
982 }
983
4be41e92 984 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
d17dd0d9 985 PCI_DMA_FROMDEVICE);
c70001a9
SC
986
987 skb = buffer->skb;
c70001a9 988 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
d17dd0d9 989 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
c70001a9
SC
990 adapter->stats.csummed++;
991 skb->ip_summed = CHECKSUM_UNNECESSARY;
992 } else {
993 skb_checksum_none_assert(skb);
994 }
995
4be41e92 996
c70001a9
SC
997 buffer->skb = NULL;
998
999 return skb;
1000}
1001
d17dd0d9
SC
1002static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
1003 struct sk_buff *skb, u16 *vlan_tag)
c70001a9
SC
1004{
1005 struct ethhdr *eth_hdr;
1006
1007 if (!__vlan_get_tag(skb, vlan_tag)) {
d17dd0d9 1008 eth_hdr = (struct ethhdr *)skb->data;
c70001a9
SC
1009 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1010 skb_pull(skb, VLAN_HLEN);
1011 }
91b7282b 1012 if (!adapter->rx_pvid)
c70001a9
SC
1013 return 0;
1014
91b7282b 1015 if (*vlan_tag == adapter->rx_pvid) {
c70001a9
SC
1016 /* Outer vlan tag. Packet should follow non-vlan path */
1017 *vlan_tag = 0xffff;
1018 return 0;
1019 }
1020 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1021 return 0;
1022
1023 return -EINVAL;
1024}
1025
1026static struct qlcnic_rx_buffer *
1027qlcnic_process_rcv(struct qlcnic_adapter *adapter,
d17dd0d9
SC
1028 struct qlcnic_host_sds_ring *sds_ring, int ring,
1029 u64 sts_data0)
c70001a9
SC
1030{
1031 struct net_device *netdev = adapter->netdev;
1032 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1033 struct qlcnic_rx_buffer *buffer;
1034 struct sk_buff *skb;
1035 struct qlcnic_host_rds_ring *rds_ring;
53643a75
SS
1036 int index, length, cksum, pkt_offset, is_lb_pkt;
1037 u16 vid = 0xffff, t_vid;
c70001a9
SC
1038
1039 if (unlikely(ring >= adapter->max_rds_rings))
1040 return NULL;
1041
1042 rds_ring = &recv_ctx->rds_rings[ring];
1043
1044 index = qlcnic_get_sts_refhandle(sts_data0);
1045 if (unlikely(index >= rds_ring->num_desc))
1046 return NULL;
1047
1048 buffer = &rds_ring->rx_buf_arr[index];
c70001a9
SC
1049 length = qlcnic_get_sts_totallength(sts_data0);
1050 cksum = qlcnic_get_sts_status(sts_data0);
1051 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1052
1053 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1054 if (!skb)
1055 return buffer;
1056
53643a75
SS
1057 if (adapter->drv_mac_learn &&
1058 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1059 t_vid = 0;
1060 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
f80bc8fe 1061 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75
SS
1062 }
1063
c70001a9
SC
1064 if (length > rds_ring->skb_size)
1065 skb_put(skb, rds_ring->skb_size);
1066 else
1067 skb_put(skb, length);
1068
1069 if (pkt_offset)
1070 skb_pull(skb, pkt_offset);
1071
1072 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1073 adapter->stats.rxdropped++;
1074 dev_kfree_skb(skb);
1075 return buffer;
1076 }
1077
1078 skb->protocol = eth_type_trans(skb, netdev);
1079
1080 if (vid != 0xffff)
86a9bad3 1081 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
c70001a9
SC
1082
1083 napi_gro_receive(&sds_ring->napi, skb);
1084
1085 adapter->stats.rx_pkts++;
1086 adapter->stats.rxbytes += length;
1087
1088 return buffer;
1089}
1090
1091#define QLC_TCP_HDR_SIZE 20
1092#define QLC_TCP_TS_OPTION_SIZE 12
1093#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1094
1095static struct qlcnic_rx_buffer *
1096qlcnic_process_lro(struct qlcnic_adapter *adapter,
d17dd0d9 1097 int ring, u64 sts_data0, u64 sts_data1)
c70001a9
SC
1098{
1099 struct net_device *netdev = adapter->netdev;
1100 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1101 struct qlcnic_rx_buffer *buffer;
1102 struct sk_buff *skb;
1103 struct qlcnic_host_rds_ring *rds_ring;
1104 struct iphdr *iph;
776e7bde 1105 struct ipv6hdr *ipv6h;
c70001a9
SC
1106 struct tcphdr *th;
1107 bool push, timestamp;
53643a75
SS
1108 int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
1109 u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
c70001a9 1110 u32 seq_number;
c70001a9
SC
1111
1112 if (unlikely(ring > adapter->max_rds_rings))
1113 return NULL;
1114
1115 rds_ring = &recv_ctx->rds_rings[ring];
1116
1117 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1118 if (unlikely(index > rds_ring->num_desc))
1119 return NULL;
1120
1121 buffer = &rds_ring->rx_buf_arr[index];
1122
1123 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1124 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1125 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1126 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1127 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1128 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1129
1130 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1131 if (!skb)
1132 return buffer;
1133
53643a75
SS
1134 if (adapter->drv_mac_learn &&
1135 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1136 t_vid = 0;
1137 is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
f80bc8fe 1138 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75
SS
1139 }
1140
c70001a9
SC
1141 if (timestamp)
1142 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1143 else
1144 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1145
1146 skb_put(skb, lro_length + data_offset);
c70001a9
SC
1147 skb_pull(skb, l2_hdr_offset);
1148
1149 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1150 adapter->stats.rxdropped++;
1151 dev_kfree_skb(skb);
1152 return buffer;
1153 }
1154
1155 skb->protocol = eth_type_trans(skb, netdev);
776e7bde 1156
069048f1 1157 if (ntohs(skb->protocol) == ETH_P_IPV6) {
776e7bde
SS
1158 ipv6h = (struct ipv6hdr *)skb->data;
1159 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1160 length = (th->doff << 2) + lro_length;
1161 ipv6h->payload_len = htons(length);
1162 } else {
1163 iph = (struct iphdr *)skb->data;
1164 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1165 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1bcac3b0 1166 csum_replace2(&iph->check, iph->tot_len, htons(length));
776e7bde 1167 iph->tot_len = htons(length);
776e7bde
SS
1168 }
1169
c70001a9
SC
1170 th->psh = push;
1171 th->seq = htonl(seq_number);
c70001a9
SC
1172 length = skb->len;
1173
bd69ba79 1174 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
c70001a9 1175 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
bd69ba79
MT
1176 if (skb->protocol == htons(ETH_P_IPV6))
1177 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1178 else
1179 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1180 }
c70001a9
SC
1181
1182 if (vid != 0xffff)
86a9bad3 1183 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
c70001a9
SC
1184 netif_receive_skb(skb);
1185
1186 adapter->stats.lro_pkts++;
1187 adapter->stats.lrobytes += length;
1188
1189 return buffer;
1190}
1191
d17dd0d9 1192int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
c70001a9 1193{
d17dd0d9 1194 struct qlcnic_host_rds_ring *rds_ring;
c70001a9
SC
1195 struct qlcnic_adapter *adapter = sds_ring->adapter;
1196 struct list_head *cur;
1197 struct status_desc *desc;
1198 struct qlcnic_rx_buffer *rxbuf;
4be41e92 1199 int opcode, desc_cnt, count = 0;
c70001a9 1200 u64 sts_data0, sts_data1;
4be41e92 1201 u8 ring;
c70001a9
SC
1202 u32 consumer = sds_ring->consumer;
1203
1204 while (count < max) {
1205 desc = &sds_ring->desc_head[consumer];
1206 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1207
1208 if (!(sts_data0 & STATUS_OWNER_HOST))
1209 break;
1210
1211 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1212 opcode = qlcnic_get_sts_opcode(sts_data0);
c70001a9
SC
1213 switch (opcode) {
1214 case QLCNIC_RXPKT_DESC:
1215 case QLCNIC_OLD_RXPKT_DESC:
1216 case QLCNIC_SYN_OFFLOAD:
1217 ring = qlcnic_get_sts_type(sts_data0);
d17dd0d9
SC
1218 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1219 sts_data0);
c70001a9
SC
1220 break;
1221 case QLCNIC_LRO_DESC:
1222 ring = qlcnic_get_lro_sts_type(sts_data0);
1223 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1224 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1225 sts_data1);
1226 break;
1227 case QLCNIC_RESPONSE_DESC:
1228 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1229 default:
1230 goto skip;
1231 }
c70001a9
SC
1232 WARN_ON(desc_cnt > 1);
1233
1234 if (likely(rxbuf))
1235 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1236 else
1237 adapter->stats.null_rxbuf++;
c70001a9
SC
1238skip:
1239 for (; desc_cnt > 0; desc_cnt--) {
1240 desc = &sds_ring->desc_head[consumer];
4be41e92 1241 desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
c70001a9
SC
1242 consumer = get_next_index(consumer, sds_ring->num_desc);
1243 }
1244 count++;
1245 }
1246
1247 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
d17dd0d9 1248 rds_ring = &adapter->recv_ctx->rds_rings[ring];
c70001a9
SC
1249 if (!list_empty(&sds_ring->free_list[ring])) {
1250 list_for_each(cur, &sds_ring->free_list[ring]) {
d17dd0d9
SC
1251 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1252 list);
c70001a9
SC
1253 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1254 }
1255 spin_lock(&rds_ring->lock);
1256 list_splice_tail_init(&sds_ring->free_list[ring],
d17dd0d9 1257 &rds_ring->free_list);
c70001a9
SC
1258 spin_unlock(&rds_ring->lock);
1259 }
1260
4be41e92 1261 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
c70001a9
SC
1262 }
1263
1264 if (count) {
1265 sds_ring->consumer = consumer;
1266 writel(consumer, sds_ring->crb_sts_consumer);
1267 }
1268
1269 return count;
1270}
1271
d17dd0d9 1272void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
4be41e92 1273 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
c70001a9
SC
1274{
1275 struct rcv_desc *pdesc;
1276 struct qlcnic_rx_buffer *buffer;
1277 int count = 0;
4be41e92 1278 u32 producer, handle;
c70001a9
SC
1279 struct list_head *head;
1280
1281 producer = rds_ring->producer;
c70001a9 1282 head = &rds_ring->free_list;
d17dd0d9 1283
c70001a9
SC
1284 while (!list_empty(head)) {
1285
1286 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1287
1288 if (!buffer->skb) {
1289 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1290 break;
1291 }
1292
1293 count++;
1294 list_del(&buffer->list);
1295
1296 /* make a rcv descriptor */
1297 pdesc = &rds_ring->desc_head[producer];
1298 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
4be41e92
SC
1299 handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
1300 ring_id);
1301 pdesc->reference_handle = cpu_to_le16(handle);
c70001a9 1302 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
c70001a9
SC
1303 producer = get_next_index(producer, rds_ring->num_desc);
1304 }
1305
1306 if (count) {
1307 rds_ring->producer = producer;
1308 writel((producer-1) & (rds_ring->num_desc-1),
d17dd0d9 1309 rds_ring->crb_rcv_producer);
c70001a9
SC
1310 }
1311}
1312
1313static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1314{
1315 int i;
1316 unsigned char *data = skb->data;
1317
d17dd0d9 1318 pr_info(KERN_INFO "\n");
c70001a9
SC
1319 for (i = 0; i < skb->len; i++) {
1320 QLCDB(adapter, DRV, "%02x ", data[i]);
1321 if ((i & 0x0f) == 8)
d17dd0d9 1322 pr_info(KERN_INFO "\n");
c70001a9
SC
1323 }
1324}
1325
1326static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1327 u64 sts_data0)
1328{
1329 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1330 struct sk_buff *skb;
1331 struct qlcnic_host_rds_ring *rds_ring;
1332 int index, length, cksum, pkt_offset;
1333
1334 if (unlikely(ring >= adapter->max_rds_rings))
1335 return;
1336
1337 rds_ring = &recv_ctx->rds_rings[ring];
1338
1339 index = qlcnic_get_sts_refhandle(sts_data0);
1340 length = qlcnic_get_sts_totallength(sts_data0);
1341 if (unlikely(index >= rds_ring->num_desc))
1342 return;
1343
1344 cksum = qlcnic_get_sts_status(sts_data0);
1345 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1346
1347 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1348 if (!skb)
1349 return;
1350
1351 if (length > rds_ring->skb_size)
1352 skb_put(skb, rds_ring->skb_size);
1353 else
1354 skb_put(skb, length);
1355
1356 if (pkt_offset)
1357 skb_pull(skb, pkt_offset);
1358
1359 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
79788450 1360 adapter->ahw->diag_cnt++;
c70001a9
SC
1361 else
1362 dump_skb(skb, adapter);
1363
1364 dev_kfree_skb_any(skb);
1365 adapter->stats.rx_pkts++;
1366 adapter->stats.rxbytes += length;
1367
1368 return;
1369}
1370
7e2cf4fe 1371void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
c70001a9
SC
1372{
1373 struct qlcnic_adapter *adapter = sds_ring->adapter;
1374 struct status_desc *desc;
1375 u64 sts_data0;
1376 int ring, opcode, desc_cnt;
1377
1378 u32 consumer = sds_ring->consumer;
1379
1380 desc = &sds_ring->desc_head[consumer];
1381 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1382
1383 if (!(sts_data0 & STATUS_OWNER_HOST))
1384 return;
1385
1386 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1387 opcode = qlcnic_get_sts_opcode(sts_data0);
1388 switch (opcode) {
1389 case QLCNIC_RESPONSE_DESC:
1390 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1391 break;
1392 default:
1393 ring = qlcnic_get_sts_type(sts_data0);
1394 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1395 break;
1396 }
1397
1398 for (; desc_cnt > 0; desc_cnt--) {
1399 desc = &sds_ring->desc_head[consumer];
1400 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1401 consumer = get_next_index(consumer, sds_ring->num_desc);
1402 }
1403
1404 sds_ring->consumer = consumer;
1405 writel(consumer, sds_ring->crb_sts_consumer);
1406}
1407
7e2cf4fe
SC
1408int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
1409 struct net_device *netdev)
c70001a9 1410{
d17dd0d9 1411 int ring, max_sds_rings;
c70001a9
SC
1412 struct qlcnic_host_sds_ring *sds_ring;
1413 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1414
1415 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1416 return -ENOMEM;
1417
d17dd0d9
SC
1418 max_sds_rings = adapter->max_sds_rings;
1419
c70001a9
SC
1420 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1421 sds_ring = &recv_ctx->sds_rings[ring];
4be41e92 1422 if (ring == adapter->max_sds_rings - 1)
c70001a9 1423 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
d17dd0d9 1424 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
c70001a9 1425 else
d17dd0d9
SC
1426 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1427 QLCNIC_NETDEV_WEIGHT*2);
c70001a9
SC
1428 }
1429
4be41e92
SC
1430 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1431 qlcnic_free_sds_rings(recv_ctx);
1432 return -ENOMEM;
1433 }
1434
c70001a9
SC
1435 return 0;
1436}
1437
4be41e92 1438void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
c70001a9
SC
1439{
1440 int ring;
1441 struct qlcnic_host_sds_ring *sds_ring;
1442 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1443
1444 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1445 sds_ring = &recv_ctx->sds_rings[ring];
1446 netif_napi_del(&sds_ring->napi);
1447 }
1448
1449 qlcnic_free_sds_rings(adapter->recv_ctx);
4be41e92 1450 qlcnic_free_tx_rings(adapter);
c70001a9
SC
1451}
1452
7e2cf4fe 1453void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
c70001a9
SC
1454{
1455 int ring;
1456 struct qlcnic_host_sds_ring *sds_ring;
1457 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1458
1459 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1460 return;
1461
1462 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1463 sds_ring = &recv_ctx->sds_rings[ring];
1464 napi_enable(&sds_ring->napi);
1465 qlcnic_enable_int(sds_ring);
1466 }
1467}
1468
7e2cf4fe 1469void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
c70001a9
SC
1470{
1471 int ring;
1472 struct qlcnic_host_sds_ring *sds_ring;
1473 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1474
1475 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1476 return;
1477
1478 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1479 sds_ring = &recv_ctx->sds_rings[ring];
1480 qlcnic_disable_int(sds_ring);
1481 napi_synchronize(&sds_ring->napi);
1482 napi_disable(&sds_ring->napi);
1483 }
1484}
4be41e92 1485
53643a75
SS
1486#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1487#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1488
1489static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1490{
1491 if (lro_pkt)
1492 return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
1493 else
1494 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1495}
1496
4be41e92
SC
1497static struct qlcnic_rx_buffer *
1498qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1499 struct qlcnic_host_sds_ring *sds_ring,
1500 u8 ring, u64 sts_data[])
1501{
1502 struct net_device *netdev = adapter->netdev;
1503 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1504 struct qlcnic_rx_buffer *buffer;
1505 struct sk_buff *skb;
1506 struct qlcnic_host_rds_ring *rds_ring;
53643a75
SS
1507 int index, length, cksum, is_lb_pkt;
1508 u16 vid = 0xffff, t_vid;
4be41e92
SC
1509
1510 if (unlikely(ring >= adapter->max_rds_rings))
1511 return NULL;
1512
1513 rds_ring = &recv_ctx->rds_rings[ring];
1514
1515 index = qlcnic_83xx_hndl(sts_data[0]);
1516 if (unlikely(index >= rds_ring->num_desc))
1517 return NULL;
1518
1519 buffer = &rds_ring->rx_buf_arr[index];
1520 length = qlcnic_83xx_pktln(sts_data[0]);
1521 cksum = qlcnic_83xx_csum_status(sts_data[1]);
1522 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1523 if (!skb)
1524 return buffer;
1525
53643a75
SS
1526 if (adapter->drv_mac_learn &&
1527 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1528 t_vid = 0;
1529 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
f80bc8fe 1530 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75
SS
1531 }
1532
4be41e92
SC
1533 if (length > rds_ring->skb_size)
1534 skb_put(skb, rds_ring->skb_size);
1535 else
1536 skb_put(skb, length);
1537
1538 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1539 adapter->stats.rxdropped++;
1540 dev_kfree_skb(skb);
1541 return buffer;
1542 }
1543
1544 skb->protocol = eth_type_trans(skb, netdev);
1545
1546 if (vid != 0xffff)
86a9bad3 1547 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4be41e92
SC
1548
1549 napi_gro_receive(&sds_ring->napi, skb);
1550
1551 adapter->stats.rx_pkts++;
1552 adapter->stats.rxbytes += length;
1553
1554 return buffer;
1555}
1556
1557static struct qlcnic_rx_buffer *
1558qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1559 u8 ring, u64 sts_data[])
1560{
1561 struct net_device *netdev = adapter->netdev;
1562 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1563 struct qlcnic_rx_buffer *buffer;
1564 struct sk_buff *skb;
1565 struct qlcnic_host_rds_ring *rds_ring;
1566 struct iphdr *iph;
1567 struct ipv6hdr *ipv6h;
1568 struct tcphdr *th;
1569 bool push;
1570 int l2_hdr_offset, l4_hdr_offset;
53643a75 1571 int index, is_lb_pkt;
99e85879 1572 u16 lro_length, length, data_offset, gso_size;
53643a75 1573 u16 vid = 0xffff, t_vid;
4be41e92
SC
1574
1575 if (unlikely(ring > adapter->max_rds_rings))
1576 return NULL;
1577
1578 rds_ring = &recv_ctx->rds_rings[ring];
1579
1580 index = qlcnic_83xx_hndl(sts_data[0]);
1581 if (unlikely(index > rds_ring->num_desc))
1582 return NULL;
1583
1584 buffer = &rds_ring->rx_buf_arr[index];
1585
1586 lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
1587 l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
1588 l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
1589 push = qlcnic_83xx_is_psh_bit(sts_data[1]);
1590
1591 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1592 if (!skb)
1593 return buffer;
53643a75
SS
1594
1595 if (adapter->drv_mac_learn &&
1596 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
1597 t_vid = 0;
1598 is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
f80bc8fe 1599 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
53643a75 1600 }
4be41e92
SC
1601 if (qlcnic_83xx_is_tstamp(sts_data[1]))
1602 data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
1603 else
1604 data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
1605
1606 skb_put(skb, lro_length + data_offset);
1607 skb_pull(skb, l2_hdr_offset);
1608
1609 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1610 adapter->stats.rxdropped++;
1611 dev_kfree_skb(skb);
1612 return buffer;
1613 }
1614
1615 skb->protocol = eth_type_trans(skb, netdev);
1616 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1617 ipv6h = (struct ipv6hdr *)skb->data;
1618 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1619
1620 length = (th->doff << 2) + lro_length;
1621 ipv6h->payload_len = htons(length);
1622 } else {
1623 iph = (struct iphdr *)skb->data;
1624 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1625 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1bcac3b0 1626 csum_replace2(&iph->check, iph->tot_len, htons(length));
4be41e92 1627 iph->tot_len = htons(length);
4be41e92
SC
1628 }
1629
1630 th->psh = push;
1631 length = skb->len;
1632
99e85879
SS
1633 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
1634 gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
1635 skb_shinfo(skb)->gso_size = gso_size;
1636 if (skb->protocol == htons(ETH_P_IPV6))
1637 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1638 else
1639 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1640 }
1641
4be41e92 1642 if (vid != 0xffff)
86a9bad3 1643 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4be41e92
SC
1644
1645 netif_receive_skb(skb);
1646
1647 adapter->stats.lro_pkts++;
1648 adapter->stats.lrobytes += length;
1649 return buffer;
1650}
1651
1652static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
1653 int max)
1654{
1655 struct qlcnic_host_rds_ring *rds_ring;
1656 struct qlcnic_adapter *adapter = sds_ring->adapter;
1657 struct list_head *cur;
1658 struct status_desc *desc;
1659 struct qlcnic_rx_buffer *rxbuf = NULL;
1660 u8 ring;
1661 u64 sts_data[2];
1662 int count = 0, opcode;
1663 u32 consumer = sds_ring->consumer;
1664
1665 while (count < max) {
1666 desc = &sds_ring->desc_head[consumer];
1667 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1668 opcode = qlcnic_83xx_opcode(sts_data[1]);
1669 if (!opcode)
1670 break;
1671 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1672 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
1673
1674 switch (opcode) {
1675 case QLC_83XX_REG_DESC:
1676 rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
1677 ring, sts_data);
1678 break;
1679 case QLC_83XX_LRO_DESC:
1680 rxbuf = qlcnic_83xx_process_lro(adapter, ring,
1681 sts_data);
1682 break;
1683 default:
1684 dev_info(&adapter->pdev->dev,
1685 "Unkonwn opcode: 0x%x\n", opcode);
1686 goto skip;
1687 }
1688
1689 if (likely(rxbuf))
1690 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1691 else
1692 adapter->stats.null_rxbuf++;
1693skip:
1694 desc = &sds_ring->desc_head[consumer];
1695 /* Reset the descriptor */
1696 desc->status_desc_data[1] = 0;
1697 consumer = get_next_index(consumer, sds_ring->num_desc);
1698 count++;
1699 }
1700 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1701 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1702 if (!list_empty(&sds_ring->free_list[ring])) {
1703 list_for_each(cur, &sds_ring->free_list[ring]) {
1704 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1705 list);
1706 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1707 }
1708 spin_lock(&rds_ring->lock);
1709 list_splice_tail_init(&sds_ring->free_list[ring],
1710 &rds_ring->free_list);
1711 spin_unlock(&rds_ring->lock);
1712 }
1713 qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
1714 }
1715 if (count) {
1716 sds_ring->consumer = consumer;
1717 writel(consumer, sds_ring->crb_sts_consumer);
1718 }
1719 return count;
1720}
1721
da6c8063
RB
1722static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1723{
1724 int tx_complete;
1725 int work_done;
1726 struct qlcnic_host_sds_ring *sds_ring;
1727 struct qlcnic_adapter *adapter;
1728 struct qlcnic_host_tx_ring *tx_ring;
1729
1730 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1731 adapter = sds_ring->adapter;
1732 /* tx ring count = 1 */
1733 tx_ring = adapter->tx_ring;
1734
1735 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1736 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1737 if ((work_done < budget) && tx_complete) {
1738 napi_complete(&sds_ring->napi);
1739 qlcnic_83xx_enable_intr(adapter, sds_ring);
1740 }
1741
1742 return work_done;
1743}
1744
4be41e92
SC
1745static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1746{
1747 int tx_complete;
1748 int work_done;
1749 struct qlcnic_host_sds_ring *sds_ring;
1750 struct qlcnic_adapter *adapter;
1751 struct qlcnic_host_tx_ring *tx_ring;
1752
1753 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1754 adapter = sds_ring->adapter;
1755 /* tx ring count = 1 */
1756 tx_ring = adapter->tx_ring;
1757
4be41e92
SC
1758 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1759 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1760 if ((work_done < budget) && tx_complete) {
1761 napi_complete(&sds_ring->napi);
ac166700 1762 qlcnic_83xx_enable_intr(adapter, sds_ring);
4be41e92
SC
1763 }
1764
1765 return work_done;
1766}
1767
1768static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
1769{
1770 int work_done;
1771 struct qlcnic_host_tx_ring *tx_ring;
1772 struct qlcnic_adapter *adapter;
1773
1774 budget = QLCNIC_TX_POLL_BUDGET;
1775 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
1776 adapter = tx_ring->adapter;
1777 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1778 if (work_done) {
1779 napi_complete(&tx_ring->napi);
1780 if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
1781 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1782 }
1783
1784 return work_done;
1785}
1786
1787static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
1788{
1789 int work_done;
1790 struct qlcnic_host_sds_ring *sds_ring;
1791 struct qlcnic_adapter *adapter;
1792
1793 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1794 adapter = sds_ring->adapter;
1795 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1796 if (work_done < budget) {
1797 napi_complete(&sds_ring->napi);
1798 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1799 qlcnic_83xx_enable_intr(adapter, sds_ring);
1800 }
1801
1802 return work_done;
1803}
1804
1805void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1806{
1807 int ring;
1808 struct qlcnic_host_sds_ring *sds_ring;
1809 struct qlcnic_host_tx_ring *tx_ring;
1810 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1811
1812 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1813 return;
1814
1815 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1816 sds_ring = &recv_ctx->sds_rings[ring];
1817 napi_enable(&sds_ring->napi);
ac166700
HM
1818 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1819 qlcnic_83xx_enable_intr(adapter, sds_ring);
4be41e92
SC
1820 }
1821
da6c8063
RB
1822 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1823 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
4be41e92
SC
1824 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1825 tx_ring = &adapter->tx_ring[ring];
1826 napi_enable(&tx_ring->napi);
1827 qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
1828 }
1829 }
1830}
1831
1832void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1833{
1834 int ring;
1835 struct qlcnic_host_sds_ring *sds_ring;
1836 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1837 struct qlcnic_host_tx_ring *tx_ring;
1838
1839 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1840 return;
1841
1842 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1843 sds_ring = &recv_ctx->sds_rings[ring];
ac166700
HM
1844 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1845 qlcnic_83xx_disable_intr(adapter, sds_ring);
4be41e92
SC
1846 napi_synchronize(&sds_ring->napi);
1847 napi_disable(&sds_ring->napi);
1848 }
1849
da6c8063
RB
1850 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1851 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
4be41e92
SC
1852 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1853 tx_ring = &adapter->tx_ring[ring];
1854 qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
1855 napi_synchronize(&tx_ring->napi);
1856 napi_disable(&tx_ring->napi);
1857 }
1858 }
1859}
1860
1861int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1862 struct net_device *netdev)
1863{
da6c8063 1864 int ring, max_sds_rings, temp;
4be41e92
SC
1865 struct qlcnic_host_sds_ring *sds_ring;
1866 struct qlcnic_host_tx_ring *tx_ring;
1867 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1868
1869 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1870 return -ENOMEM;
1871
1872 max_sds_rings = adapter->max_sds_rings;
1873 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1874 sds_ring = &recv_ctx->sds_rings[ring];
da6c8063
RB
1875 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1876 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1877 netif_napi_add(netdev, &sds_ring->napi,
1878 qlcnic_83xx_rx_poll,
1879 QLCNIC_NETDEV_WEIGHT * 2);
1880 } else {
1881 temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1882 netif_napi_add(netdev, &sds_ring->napi,
1883 qlcnic_83xx_msix_sriov_vf_poll,
1884 temp);
1885 }
1886
1887 } else {
4be41e92
SC
1888 netif_napi_add(netdev, &sds_ring->napi,
1889 qlcnic_83xx_poll,
1890 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
da6c8063 1891 }
4be41e92
SC
1892 }
1893
1894 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
1895 qlcnic_free_sds_rings(recv_ctx);
1896 return -ENOMEM;
1897 }
1898
da6c8063
RB
1899 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1900 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
4be41e92
SC
1901 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1902 tx_ring = &adapter->tx_ring[ring];
1903 netif_napi_add(netdev, &tx_ring->napi,
1904 qlcnic_83xx_msix_tx_poll,
1905 QLCNIC_NETDEV_WEIGHT);
1906 }
1907 }
1908
1909 return 0;
1910}
1911
1912void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1913{
1914 int ring;
1915 struct qlcnic_host_sds_ring *sds_ring;
1916 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1917 struct qlcnic_host_tx_ring *tx_ring;
1918
1919 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1920 sds_ring = &recv_ctx->sds_rings[ring];
1921 netif_napi_del(&sds_ring->napi);
1922 }
1923
1924 qlcnic_free_sds_rings(adapter->recv_ctx);
1925
da6c8063
RB
1926 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1927 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
4be41e92
SC
1928 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1929 tx_ring = &adapter->tx_ring[ring];
1930 netif_napi_del(&tx_ring->napi);
1931 }
1932 }
1933
1934 qlcnic_free_tx_rings(adapter);
1935}
1936
1937void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
1938 int ring, u64 sts_data[])
1939{
1940 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1941 struct sk_buff *skb;
1942 struct qlcnic_host_rds_ring *rds_ring;
1943 int index, length;
1944
1945 if (unlikely(ring >= adapter->max_rds_rings))
1946 return;
1947
1948 rds_ring = &recv_ctx->rds_rings[ring];
1949 index = qlcnic_83xx_hndl(sts_data[0]);
1950 if (unlikely(index >= rds_ring->num_desc))
1951 return;
1952
1953 length = qlcnic_83xx_pktln(sts_data[0]);
1954
1955 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1956 if (!skb)
1957 return;
1958
1959 if (length > rds_ring->skb_size)
1960 skb_put(skb, rds_ring->skb_size);
1961 else
1962 skb_put(skb, length);
1963
1964 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1965 adapter->ahw->diag_cnt++;
1966 else
1967 dump_skb(skb, adapter);
1968
1969 dev_kfree_skb_any(skb);
1970 return;
1971}
1972
1973void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1974{
1975 struct qlcnic_adapter *adapter = sds_ring->adapter;
1976 struct status_desc *desc;
1977 u64 sts_data[2];
1978 int ring, opcode;
1979 u32 consumer = sds_ring->consumer;
1980
1981 desc = &sds_ring->desc_head[consumer];
1982 sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
1983 sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
1984 opcode = qlcnic_83xx_opcode(sts_data[1]);
1985 if (!opcode)
1986 return;
1987
1988 ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
1989 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
1990 desc = &sds_ring->desc_head[consumer];
1991 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1992 consumer = get_next_index(consumer, sds_ring->num_desc);
1993 sds_ring->consumer = consumer;
1994 writel(consumer, sds_ring->crb_sts_consumer);
1995}