#define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
+/* Use the same len for sbq and lbq. Note that it seems like the device might
+ * support different sizes.
+ */
+#define QLGE_BQ_SHIFT 9
+#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT)
+#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64))
+
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
dma_addr_t base_indirect_dma;
struct qlge_bq_desc *queue;
void __iomem *prod_idx_db_reg;
- u32 len; /* entry count */
- u32 size; /* size in bytes of hw ring */
u32 prod_idx; /* current sw prod idx */
u32 curr_idx; /* next entry we expect */
u32 clean_idx; /* beginning of new descs */
offsetof(struct rx_ring, lbq))); \
})
+#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
+
struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block. */
{
struct qlge_bq_desc *bq_desc;
- bq_desc = &bq->queue[bq->curr_idx++];
- if (bq->curr_idx == bq->len)
- bq->curr_idx = 0;
+ bq_desc = &bq->queue[bq->curr_idx];
+ bq->curr_idx = QLGE_BQ_WRAP(bq->curr_idx + 1);
bq->free_cnt++;
return bq_desc;
return;
}
- clean_idx++;
- if (clean_idx == bq->len)
- clean_idx = 0;
+ clean_idx = QLGE_BQ_WRAP(clean_idx + 1);
}
bq->clean_idx = clean_idx;
- bq->prod_idx += 16;
- if (bq->prod_idx == bq->len)
- bq->prod_idx = 0;
+ bq->prod_idx = QLGE_BQ_WRAP(bq->prod_idx + 16);
bq->free_cnt -= 16;
}
put_page(lbq_desc->p.pg_chunk.page);
lbq_desc->p.pg_chunk.page = NULL;
- if (++curr_idx == rx_ring->lbq.len)
- curr_idx = 0;
+ curr_idx = QLGE_BQ_WRAP(curr_idx + 1);
}
if (rx_ring->master_chunk.page) {
{
int i;
- for (i = 0; i < rx_ring->sbq.len; i++) {
+ for (i = 0; i < QLGE_BQ_LEN; i++) {
struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
if (sbq_desc == NULL) {
__le64 *buf_ptr;
int i;
- bq->base = pci_alloc_consistent(qdev->pdev, bq->size, &bq->base_dma);
+ bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ &bq->base_dma);
if (!bq->base) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s allocation failed.\n", rx_ring->cq_id,
return -ENOMEM;
}
- bq->queue = kmalloc_array(bq->len, sizeof(struct qlge_bq_desc),
+ bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
GFP_KERNEL);
if (!bq->queue)
return -ENOMEM;
- memset(bq->queue, 0, bq->len * sizeof(struct qlge_bq_desc));
+ memset(bq->queue, 0, QLGE_BQ_LEN * sizeof(struct qlge_bq_desc));
buf_ptr = bq->base;
bq_desc = &bq->queue[0];
- for (i = 0; i < bq->len; i++, buf_ptr++, bq_desc++) {
+ for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
memset(bq_desc, 0, sizeof(*bq_desc));
bq_desc->index = i;
bq_desc->buf_ptr = buf_ptr;
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq.size,
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
rx_ring->sbq.base, rx_ring->sbq.base_dma);
rx_ring->sbq.base = NULL;
}
/* Free the large buffer queue. */
if (rx_ring->lbq.base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq.size,
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
rx_ring->lbq.base, rx_ring->lbq.base_dma);
rx_ring->lbq.base = NULL;
}
return -ENOMEM;
}
- if (rx_ring->sbq.len && qlge_init_bq(&rx_ring->sbq))
- goto err_mem;
- if (rx_ring->lbq.len && qlge_init_bq(&rx_ring->lbq))
- goto err_mem;
+ if (rx_ring->cq_id < qdev->rss_ring_count &&
+ (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
+ }
return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
shadow_reg_dma += sizeof(u64);
rx_ring->lbq.base_indirect = shadow_reg;
rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len));
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
rx_ring->sbq.base_indirect = shadow_reg;
rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq.len) {
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq.base_dma;
base_indirect_ptr = rx_ring->lbq.base_indirect;
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq.len));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
- cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->lbq.len));
+ cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->lbq.prod_idx = 0;
rx_ring->lbq.curr_idx = 0;
rx_ring->lbq.clean_idx = 0;
- rx_ring->lbq.free_cnt = rx_ring->lbq.len;
- }
- if (rx_ring->sbq.len) {
+ rx_ring->lbq.free_cnt = QLGE_BQ_LEN;
+
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq.base_dma;
base_indirect_ptr = rx_ring->sbq.base_indirect;
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq.len));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
- cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(rx_ring->sbq.len));
+ cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
rx_ring->sbq.prod_idx = 0;
rx_ring->sbq.curr_idx = 0;
rx_ring->sbq.clean_idx = 0;
- rx_ring->sbq.free_cnt = rx_ring->sbq.len;
+ rx_ring->sbq.free_cnt = QLGE_BQ_LEN;
}
if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
rx_ring->lbq.type = QLGE_LB;
- rx_ring->lbq.len = NUM_LARGE_BUFFERS;
- rx_ring->lbq.size = rx_ring->lbq.len * sizeof(__le64);
rx_ring->sbq.type = QLGE_SB;
- rx_ring->sbq.len = NUM_SMALL_BUFFERS;
- rx_ring->sbq.size = rx_ring->sbq.len * sizeof(__le64);
} else {
/*
* Outbound queue handles outbound completions only.
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq.len = 0;
- rx_ring->lbq.size = 0;
- rx_ring->sbq.len = 0;
- rx_ring->sbq.size = 0;
}
}
return 0;