unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int hd_len, int elements);
+ unsigned int hd_len, unsigned int offset, int elements);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *,
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
- skb_pull(skb, hd_len);
- }
-
/* IQD */
- if (offset > 0) {
+ } else if (offset) {
int element = buf->next_element_to_fill;
is_first_elem = false;
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int hd_len, int elements_needed)
+ unsigned int offset, unsigned int hd_len,
+ int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
}
}
}
- tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, hd_len);
+ tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
rc = -EINVAL;
goto out;
}
- rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, elements);
+ rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, 0, elements);
out:
if (!rc) {
/* tx success, free dangling original */
return -E2BIG;
if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
return -EINVAL;
- return qeth_do_send_packet(card, queue, skb, hdr, 0, elements);
+ return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
}
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
- elements);
+ hd_len, elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, 0);