]>
Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #ifndef NICVF_QUEUES_H | |
10 | #define NICVF_QUEUES_H | |
11 | ||
12 | #include <linux/netdevice.h> | |
13 | #include "q_struct.h" | |
14 | ||
15 | #define MAX_QUEUE_SET 128 | |
16 | #define MAX_RCV_QUEUES_PER_QS 8 | |
17 | #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 | |
18 | #define MAX_SND_QUEUES_PER_QS 8 | |
19 | #define MAX_CMP_QUEUES_PER_QS 8 | |
20 | ||
21 | /* VF's queue interrupt ranges */ | |
22 | #define NICVF_INTR_ID_CQ 0 | |
23 | #define NICVF_INTR_ID_SQ 8 | |
24 | #define NICVF_INTR_ID_RBDR 16 | |
25 | #define NICVF_INTR_ID_MISC 18 | |
26 | #define NICVF_INTR_ID_QS_ERR 19 | |
27 | ||
28 | #define for_each_cq_irq(irq) \ | |
29 | for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) | |
30 | #define for_each_sq_irq(irq) \ | |
31 | for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) | |
32 | #define for_each_rbdr_irq(irq) \ | |
33 | for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) | |
34 | ||
35 | #define RBDR_SIZE0 0ULL /* 8K entries */ | |
36 | #define RBDR_SIZE1 1ULL /* 16K entries */ | |
37 | #define RBDR_SIZE2 2ULL /* 32K entries */ | |
38 | #define RBDR_SIZE3 3ULL /* 64K entries */ | |
39 | #define RBDR_SIZE4 4ULL /* 126K entries */ | |
40 | #define RBDR_SIZE5 5ULL /* 256K entries */ | |
41 | #define RBDR_SIZE6 6ULL /* 512K entries */ | |
42 | ||
43 | #define SND_QUEUE_SIZE0 0ULL /* 1K entries */ | |
44 | #define SND_QUEUE_SIZE1 1ULL /* 2K entries */ | |
45 | #define SND_QUEUE_SIZE2 2ULL /* 4K entries */ | |
46 | #define SND_QUEUE_SIZE3 3ULL /* 8K entries */ | |
47 | #define SND_QUEUE_SIZE4 4ULL /* 16K entries */ | |
48 | #define SND_QUEUE_SIZE5 5ULL /* 32K entries */ | |
49 | #define SND_QUEUE_SIZE6 6ULL /* 64K entries */ | |
50 | ||
51 | #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ | |
52 | #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ | |
53 | #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ | |
54 | #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ | |
55 | #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ | |
56 | #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ | |
57 | #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ | |
58 | ||
59 | /* Default queue count per QS, its lengths and threshold values */ | |
3a397ebe | 60 | #define DEFAULT_RBDR_CNT 1 |
4863dea3 | 61 | |
32c1b965 | 62 | #define SND_QSIZE SND_QUEUE_SIZE2 |
4863dea3 SG |
63 | #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) |
64 | #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) | |
65 | #define SND_QUEUE_THRESH 2ULL | |
66 | #define MIN_SQ_DESC_PER_PKT_XMIT 2 | |
67 | /* Since timestamp not enabled, otherwise 2 */ | |
68 | #define MAX_CQE_PER_PKT_XMIT 1 | |
69 | ||
32c1b965 SG |
70 | /* Keep CQ and SQ sizes same, if timestamping |
71 | * is enabled this equation will change. | |
72 | */ | |
73 | #define CMP_QSIZE CMP_QUEUE_SIZE2 | |
4863dea3 | 74 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) |
b9687b48 | 75 | #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) |
006394a7 | 76 | #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ |
4863dea3 SG |
77 | |
78 | #define RBDR_SIZE RBDR_SIZE0 | |
79 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) | |
80 | #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) | |
81 | #define RBDR_THRESH (RCV_BUF_COUNT / 2) | |
82 | #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ | |
668dda06 SG |
83 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ |
84 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | |
4863dea3 SG |
85 | |
86 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ | |
87 | MAX_CQE_PER_PKT_XMIT) | |
32c1b965 SG |
88 | /* Calculate number of CQEs to reserve for all SQEs. |
89 | * Its 1/256th level of CQ size. | |
90 | * '+ 1' to account for pipelining | |
91 | */ | |
92 | #define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ | |
93 | (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) | |
4863dea3 SG |
94 | |
95 | /* Descriptor size in bytes */ | |
96 | #define SND_QUEUE_DESC_SIZE 16 | |
97 | #define CMP_QUEUE_DESC_SIZE 512 | |
98 | ||
99 | /* Buffer / descriptor alignments */ | |
100 | #define NICVF_RCV_BUF_ALIGN 7 | |
101 | #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) | |
102 | #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ | |
103 | #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ | |
104 | ||
105 | #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) | |
4863dea3 SG |
106 | |
107 | /* Queue enable/disable */ | |
108 | #define NICVF_SQ_EN BIT_ULL(19) | |
109 | ||
110 | /* Queue reset */ | |
111 | #define NICVF_CQ_RESET BIT_ULL(41) | |
112 | #define NICVF_SQ_RESET BIT_ULL(17) | |
113 | #define NICVF_RBDR_RESET BIT_ULL(43) | |
114 | ||
115 | enum CQ_RX_ERRLVL_E { | |
116 | CQ_ERRLVL_MAC, | |
117 | CQ_ERRLVL_L2, | |
118 | CQ_ERRLVL_L3, | |
119 | CQ_ERRLVL_L4, | |
120 | }; | |
121 | ||
122 | enum CQ_RX_ERROP_E { | |
123 | CQ_RX_ERROP_RE_NONE = 0x0, | |
124 | CQ_RX_ERROP_RE_PARTIAL = 0x1, | |
125 | CQ_RX_ERROP_RE_JABBER = 0x2, | |
126 | CQ_RX_ERROP_RE_FCS = 0x7, | |
127 | CQ_RX_ERROP_RE_TERMINATE = 0x9, | |
128 | CQ_RX_ERROP_RE_RX_CTL = 0xb, | |
129 | CQ_RX_ERROP_PREL2_ERR = 0x1f, | |
130 | CQ_RX_ERROP_L2_FRAGMENT = 0x20, | |
131 | CQ_RX_ERROP_L2_OVERRUN = 0x21, | |
132 | CQ_RX_ERROP_L2_PFCS = 0x22, | |
133 | CQ_RX_ERROP_L2_PUNY = 0x23, | |
134 | CQ_RX_ERROP_L2_MAL = 0x24, | |
135 | CQ_RX_ERROP_L2_OVERSIZE = 0x25, | |
136 | CQ_RX_ERROP_L2_UNDERSIZE = 0x26, | |
137 | CQ_RX_ERROP_L2_LENMISM = 0x27, | |
138 | CQ_RX_ERROP_L2_PCLP = 0x28, | |
139 | CQ_RX_ERROP_IP_NOT = 0x41, | |
140 | CQ_RX_ERROP_IP_CSUM_ERR = 0x42, | |
141 | CQ_RX_ERROP_IP_MAL = 0x43, | |
142 | CQ_RX_ERROP_IP_MALD = 0x44, | |
143 | CQ_RX_ERROP_IP_HOP = 0x45, | |
144 | CQ_RX_ERROP_L3_ICRC = 0x46, | |
145 | CQ_RX_ERROP_L3_PCLP = 0x47, | |
146 | CQ_RX_ERROP_L4_MAL = 0x61, | |
147 | CQ_RX_ERROP_L4_CHK = 0x62, | |
148 | CQ_RX_ERROP_UDP_LEN = 0x63, | |
149 | CQ_RX_ERROP_L4_PORT = 0x64, | |
150 | CQ_RX_ERROP_TCP_FLAG = 0x65, | |
151 | CQ_RX_ERROP_TCP_OFFSET = 0x66, | |
152 | CQ_RX_ERROP_L4_PCLP = 0x67, | |
153 | CQ_RX_ERROP_RBDR_TRUNC = 0x70, | |
154 | }; | |
155 | ||
156 | enum CQ_TX_ERROP_E { | |
157 | CQ_TX_ERROP_GOOD = 0x0, | |
158 | CQ_TX_ERROP_DESC_FAULT = 0x10, | |
159 | CQ_TX_ERROP_HDR_CONS_ERR = 0x11, | |
160 | CQ_TX_ERROP_SUBDC_ERR = 0x12, | |
712c3185 | 161 | CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, |
4863dea3 SG |
162 | CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, |
163 | CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, | |
164 | CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, | |
165 | CQ_TX_ERROP_LOCK_VIOL = 0x83, | |
166 | CQ_TX_ERROP_DATA_FAULT = 0x84, | |
167 | CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, | |
168 | CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, | |
169 | CQ_TX_ERROP_MEM_FAULT = 0x87, | |
170 | CQ_TX_ERROP_CK_OVERLAP = 0x88, | |
171 | CQ_TX_ERROP_CK_OFLOW = 0x89, | |
172 | CQ_TX_ERROP_ENUM_LAST = 0x8a, | |
173 | }; | |
174 | ||
4863dea3 SG |
175 | enum RQ_SQ_STATS { |
176 | RQ_SQ_STATS_OCTS, | |
177 | RQ_SQ_STATS_PKTS, | |
178 | }; | |
179 | ||
180 | struct rx_tx_queue_stats { | |
181 | u64 bytes; | |
182 | u64 pkts; | |
183 | } ____cacheline_aligned_in_smp; | |
184 | ||
185 | struct q_desc_mem { | |
186 | dma_addr_t dma; | |
187 | u64 size; | |
188 | u16 q_len; | |
189 | dma_addr_t phys_base; | |
190 | void *base; | |
191 | void *unalign_base; | |
192 | }; | |
193 | ||
194 | struct rbdr { | |
195 | bool enable; | |
196 | u32 dma_size; | |
197 | u32 frag_len; | |
198 | u32 thresh; /* Threshold level for interrupt */ | |
199 | void *desc; | |
200 | u32 head; | |
201 | u32 tail; | |
202 | struct q_desc_mem dmem; | |
203 | } ____cacheline_aligned_in_smp; | |
204 | ||
205 | struct rcv_queue { | |
206 | bool enable; | |
207 | struct rbdr *rbdr_start; | |
208 | struct rbdr *rbdr_cont; | |
209 | bool en_tcp_reassembly; | |
210 | u8 cq_qs; /* CQ's QS to which this RQ is assigned */ | |
211 | u8 cq_idx; /* CQ index (0 to 7) in the QS */ | |
212 | u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ | |
213 | u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ | |
214 | u8 start_rbdr_qs; /* First buffer ptrs - QS num */ | |
215 | u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ | |
216 | u8 caching; | |
217 | struct rx_tx_queue_stats stats; | |
218 | } ____cacheline_aligned_in_smp; | |
219 | ||
220 | struct cmp_queue { | |
221 | bool enable; | |
222 | u16 thresh; | |
223 | spinlock_t lock; /* lock to serialize processing CQEs */ | |
224 | void *desc; | |
225 | struct q_desc_mem dmem; | |
39ad6eea | 226 | int irq; |
4863dea3 SG |
227 | } ____cacheline_aligned_in_smp; |
228 | ||
229 | struct snd_queue { | |
230 | bool enable; | |
231 | u8 cq_qs; /* CQ's QS to which this SQ is pointing */ | |
232 | u8 cq_idx; /* CQ index (0 to 7) in the above QS */ | |
233 | u16 thresh; | |
234 | atomic_t free_cnt; | |
235 | u32 head; | |
236 | u32 tail; | |
237 | u64 *skbuff; | |
238 | void *desc; | |
239 | ||
240 | #define TSO_HEADER_SIZE 128 | |
241 | /* For TSO segment's header */ | |
242 | char *tso_hdrs; | |
243 | dma_addr_t tso_hdrs_phys; | |
244 | ||
245 | cpumask_t affinity_mask; | |
246 | struct q_desc_mem dmem; | |
247 | struct rx_tx_queue_stats stats; | |
248 | } ____cacheline_aligned_in_smp; | |
249 | ||
250 | struct queue_set { | |
251 | bool enable; | |
252 | bool be_en; | |
253 | u8 vnic_id; | |
254 | u8 rq_cnt; | |
255 | u8 cq_cnt; | |
256 | u64 cq_len; | |
257 | u8 sq_cnt; | |
258 | u64 sq_len; | |
259 | u8 rbdr_cnt; | |
260 | u64 rbdr_len; | |
261 | struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; | |
262 | struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; | |
263 | struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; | |
264 | struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; | |
265 | } ____cacheline_aligned_in_smp; | |
266 | ||
267 | #define GET_RBDR_DESC(RING, idx)\ | |
268 | (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) | |
269 | #define GET_SQ_DESC(RING, idx)\ | |
270 | (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) | |
271 | #define GET_CQ_DESC(RING, idx)\ | |
272 | (&(((union cq_desc_t *)((RING)->desc))[idx])) | |
273 | ||
274 | /* CQ status bits */ | |
275 | #define CQ_WR_FULL BIT(26) | |
276 | #define CQ_WR_DISABLE BIT(25) | |
277 | #define CQ_WR_FAULT BIT(24) | |
278 | #define CQ_CQE_COUNT (0xFFFF << 0) | |
279 | ||
280 | #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) | |
281 | ||
aa2e259b SG |
282 | void nicvf_config_vlan_stripping(struct nicvf *nic, |
283 | netdev_features_t features); | |
4863dea3 SG |
284 | int nicvf_set_qset_resources(struct nicvf *nic); |
285 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable); | |
286 | void nicvf_qset_config(struct nicvf *nic, bool enable); | |
287 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
288 | int qidx, bool enable); | |
289 | ||
290 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); | |
291 | void nicvf_sq_disable(struct nicvf *nic, int qidx); | |
292 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); | |
293 | void nicvf_sq_free_used_descs(struct net_device *netdev, | |
294 | struct snd_queue *sq, int qidx); | |
295 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); | |
296 | ||
297 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); | |
298 | void nicvf_rbdr_task(unsigned long data); | |
299 | void nicvf_rbdr_work(struct work_struct *work); | |
300 | ||
301 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); | |
302 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); | |
303 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); | |
304 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); | |
305 | ||
306 | /* Register access APIs */ | |
307 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); | |
308 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset); | |
309 | void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); | |
310 | u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); | |
311 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, | |
312 | u64 qidx, u64 val); | |
313 | u64 nicvf_queue_reg_read(struct nicvf *nic, | |
314 | u64 offset, u64 qidx); | |
315 | ||
316 | /* Stats */ | |
317 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); | |
318 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); | |
ad2ecebd | 319 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); |
964cb69b | 320 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); |
4863dea3 | 321 | #endif /* NICVF_QUEUES_H */ |