check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
netdev_err(adapter->netdev,
"payload offset should not ant event number\n");
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
- unsigned int hdr_len = skb_transport_offset(skb);
+ unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use;
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- cksum_offset = skb_transport_offset(skb);
+ cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
(ip_proto == IPPROTO_UDP ?
TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
- start = skb_transport_offset(skb) -
+ start = skb_checksum_start_offset(skb) -
(pad_bytes + sizeof(struct tx_pkt_hdr));
stuff = start + skb->csum_offset;
td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const int offset = skb_transport_offset(skb);
+ const int offset = skb_checksum_start_offset(skb);
/* This seems backwards, but it is what the sk98lin
* does. Looks like hardware is wrong?
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = (TXDCTRL_CENAB |
tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u32 csum_start_off = skb_transport_offset(skb);
+ const u32 csum_start_off = skb_checksum_start_offset(skb);
const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
{
struct Vmxnet3_TxDataDesc *tdd;
- if (ctx->mss) {
+ if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
unsigned int pull_size;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- ctx->eth_ip_hdr_size = skb_transport_offset(skb);
+ ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)