]>
Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #ifndef NICVF_QUEUES_H | |
10 | #define NICVF_QUEUES_H | |
11 | ||
12 | #include <linux/netdevice.h> | |
13 | #include "q_struct.h" | |
14 | ||
15 | #define MAX_QUEUE_SET 128 | |
16 | #define MAX_RCV_QUEUES_PER_QS 8 | |
17 | #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 | |
18 | #define MAX_SND_QUEUES_PER_QS 8 | |
19 | #define MAX_CMP_QUEUES_PER_QS 8 | |
20 | ||
21 | /* VF's queue interrupt ranges */ | |
22 | #define NICVF_INTR_ID_CQ 0 | |
23 | #define NICVF_INTR_ID_SQ 8 | |
24 | #define NICVF_INTR_ID_RBDR 16 | |
25 | #define NICVF_INTR_ID_MISC 18 | |
26 | #define NICVF_INTR_ID_QS_ERR 19 | |
27 | ||
28 | #define for_each_cq_irq(irq) \ | |
29 | for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) | |
30 | #define for_each_sq_irq(irq) \ | |
31 | for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) | |
32 | #define for_each_rbdr_irq(irq) \ | |
33 | for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) | |
34 | ||
35 | #define RBDR_SIZE0 0ULL /* 8K entries */ | |
36 | #define RBDR_SIZE1 1ULL /* 16K entries */ | |
37 | #define RBDR_SIZE2 2ULL /* 32K entries */ | |
38 | #define RBDR_SIZE3 3ULL /* 64K entries */ | |
39 | #define RBDR_SIZE4 4ULL /* 126K entries */ | |
40 | #define RBDR_SIZE5 5ULL /* 256K entries */ | |
41 | #define RBDR_SIZE6 6ULL /* 512K entries */ | |
42 | ||
43 | #define SND_QUEUE_SIZE0 0ULL /* 1K entries */ | |
44 | #define SND_QUEUE_SIZE1 1ULL /* 2K entries */ | |
45 | #define SND_QUEUE_SIZE2 2ULL /* 4K entries */ | |
46 | #define SND_QUEUE_SIZE3 3ULL /* 8K entries */ | |
47 | #define SND_QUEUE_SIZE4 4ULL /* 16K entries */ | |
48 | #define SND_QUEUE_SIZE5 5ULL /* 32K entries */ | |
49 | #define SND_QUEUE_SIZE6 6ULL /* 64K entries */ | |
50 | ||
51 | #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ | |
52 | #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ | |
53 | #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ | |
54 | #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ | |
55 | #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ | |
56 | #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ | |
57 | #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ | |
58 | ||
59 | /* Default queue count per QS, its lengths and threshold values */ | |
60 | #define RBDR_CNT 1 | |
61 | #define RCV_QUEUE_CNT 8 | |
62 | #define SND_QUEUE_CNT 8 | |
63 | #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ | |
64 | ||
32c1b965 | 65 | #define SND_QSIZE SND_QUEUE_SIZE2 |
4863dea3 SG |
66 | #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) |
67 | #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) | |
68 | #define SND_QUEUE_THRESH 2ULL | |
69 | #define MIN_SQ_DESC_PER_PKT_XMIT 2 | |
70 | /* Since timestamp not enabled, otherwise 2 */ | |
71 | #define MAX_CQE_PER_PKT_XMIT 1 | |
72 | ||
32c1b965 SG |
73 | /* Keep CQ and SQ sizes same, if timestamping |
74 | * is enabled this equation will change. | |
75 | */ | |
76 | #define CMP_QSIZE CMP_QUEUE_SIZE2 | |
4863dea3 | 77 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) |
b9687b48 | 78 | #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) |
006394a7 | 79 | #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ |
4863dea3 SG |
80 | |
81 | #define RBDR_SIZE RBDR_SIZE0 | |
82 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) | |
83 | #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) | |
84 | #define RBDR_THRESH (RCV_BUF_COUNT / 2) | |
85 | #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ | |
668dda06 SG |
86 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ |
87 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | |
4863dea3 SG |
88 | |
89 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ | |
90 | MAX_CQE_PER_PKT_XMIT) | |
32c1b965 SG |
91 | /* Calculate number of CQEs to reserve for all SQEs. |
92 | * Its 1/256th level of CQ size. | |
93 | * '+ 1' to account for pipelining | |
94 | */ | |
95 | #define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ | |
96 | (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) | |
4863dea3 SG |
97 | |
98 | /* Descriptor size in bytes */ | |
99 | #define SND_QUEUE_DESC_SIZE 16 | |
100 | #define CMP_QUEUE_DESC_SIZE 512 | |
101 | ||
102 | /* Buffer / descriptor alignments */ | |
103 | #define NICVF_RCV_BUF_ALIGN 7 | |
104 | #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) | |
105 | #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ | |
106 | #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ | |
107 | ||
108 | #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) | |
4863dea3 SG |
109 | |
110 | /* Queue enable/disable */ | |
111 | #define NICVF_SQ_EN BIT_ULL(19) | |
112 | ||
113 | /* Queue reset */ | |
114 | #define NICVF_CQ_RESET BIT_ULL(41) | |
115 | #define NICVF_SQ_RESET BIT_ULL(17) | |
116 | #define NICVF_RBDR_RESET BIT_ULL(43) | |
117 | ||
118 | enum CQ_RX_ERRLVL_E { | |
119 | CQ_ERRLVL_MAC, | |
120 | CQ_ERRLVL_L2, | |
121 | CQ_ERRLVL_L3, | |
122 | CQ_ERRLVL_L4, | |
123 | }; | |
124 | ||
125 | enum CQ_RX_ERROP_E { | |
126 | CQ_RX_ERROP_RE_NONE = 0x0, | |
127 | CQ_RX_ERROP_RE_PARTIAL = 0x1, | |
128 | CQ_RX_ERROP_RE_JABBER = 0x2, | |
129 | CQ_RX_ERROP_RE_FCS = 0x7, | |
130 | CQ_RX_ERROP_RE_TERMINATE = 0x9, | |
131 | CQ_RX_ERROP_RE_RX_CTL = 0xb, | |
132 | CQ_RX_ERROP_PREL2_ERR = 0x1f, | |
133 | CQ_RX_ERROP_L2_FRAGMENT = 0x20, | |
134 | CQ_RX_ERROP_L2_OVERRUN = 0x21, | |
135 | CQ_RX_ERROP_L2_PFCS = 0x22, | |
136 | CQ_RX_ERROP_L2_PUNY = 0x23, | |
137 | CQ_RX_ERROP_L2_MAL = 0x24, | |
138 | CQ_RX_ERROP_L2_OVERSIZE = 0x25, | |
139 | CQ_RX_ERROP_L2_UNDERSIZE = 0x26, | |
140 | CQ_RX_ERROP_L2_LENMISM = 0x27, | |
141 | CQ_RX_ERROP_L2_PCLP = 0x28, | |
142 | CQ_RX_ERROP_IP_NOT = 0x41, | |
143 | CQ_RX_ERROP_IP_CSUM_ERR = 0x42, | |
144 | CQ_RX_ERROP_IP_MAL = 0x43, | |
145 | CQ_RX_ERROP_IP_MALD = 0x44, | |
146 | CQ_RX_ERROP_IP_HOP = 0x45, | |
147 | CQ_RX_ERROP_L3_ICRC = 0x46, | |
148 | CQ_RX_ERROP_L3_PCLP = 0x47, | |
149 | CQ_RX_ERROP_L4_MAL = 0x61, | |
150 | CQ_RX_ERROP_L4_CHK = 0x62, | |
151 | CQ_RX_ERROP_UDP_LEN = 0x63, | |
152 | CQ_RX_ERROP_L4_PORT = 0x64, | |
153 | CQ_RX_ERROP_TCP_FLAG = 0x65, | |
154 | CQ_RX_ERROP_TCP_OFFSET = 0x66, | |
155 | CQ_RX_ERROP_L4_PCLP = 0x67, | |
156 | CQ_RX_ERROP_RBDR_TRUNC = 0x70, | |
157 | }; | |
158 | ||
159 | enum CQ_TX_ERROP_E { | |
160 | CQ_TX_ERROP_GOOD = 0x0, | |
161 | CQ_TX_ERROP_DESC_FAULT = 0x10, | |
162 | CQ_TX_ERROP_HDR_CONS_ERR = 0x11, | |
163 | CQ_TX_ERROP_SUBDC_ERR = 0x12, | |
164 | CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, | |
165 | CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, | |
166 | CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, | |
167 | CQ_TX_ERROP_LOCK_VIOL = 0x83, | |
168 | CQ_TX_ERROP_DATA_FAULT = 0x84, | |
169 | CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, | |
170 | CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, | |
171 | CQ_TX_ERROP_MEM_FAULT = 0x87, | |
172 | CQ_TX_ERROP_CK_OVERLAP = 0x88, | |
173 | CQ_TX_ERROP_CK_OFLOW = 0x89, | |
174 | CQ_TX_ERROP_ENUM_LAST = 0x8a, | |
175 | }; | |
176 | ||
177 | struct cmp_queue_stats { | |
4863dea3 SG |
178 | struct tx_stats { |
179 | u64 good; | |
180 | u64 desc_fault; | |
181 | u64 hdr_cons_err; | |
182 | u64 subdesc_err; | |
183 | u64 imm_size_oflow; | |
184 | u64 data_seq_err; | |
185 | u64 mem_seq_err; | |
186 | u64 lock_viol; | |
187 | u64 data_fault; | |
188 | u64 tstmp_conflict; | |
189 | u64 tstmp_timeout; | |
190 | u64 mem_fault; | |
191 | u64 csum_overlap; | |
192 | u64 csum_overflow; | |
193 | } tx; | |
194 | } ____cacheline_aligned_in_smp; | |
195 | ||
196 | enum RQ_SQ_STATS { | |
197 | RQ_SQ_STATS_OCTS, | |
198 | RQ_SQ_STATS_PKTS, | |
199 | }; | |
200 | ||
201 | struct rx_tx_queue_stats { | |
202 | u64 bytes; | |
203 | u64 pkts; | |
204 | } ____cacheline_aligned_in_smp; | |
205 | ||
206 | struct q_desc_mem { | |
207 | dma_addr_t dma; | |
208 | u64 size; | |
209 | u16 q_len; | |
210 | dma_addr_t phys_base; | |
211 | void *base; | |
212 | void *unalign_base; | |
213 | }; | |
214 | ||
215 | struct rbdr { | |
216 | bool enable; | |
217 | u32 dma_size; | |
218 | u32 frag_len; | |
219 | u32 thresh; /* Threshold level for interrupt */ | |
220 | void *desc; | |
221 | u32 head; | |
222 | u32 tail; | |
223 | struct q_desc_mem dmem; | |
224 | } ____cacheline_aligned_in_smp; | |
225 | ||
226 | struct rcv_queue { | |
227 | bool enable; | |
228 | struct rbdr *rbdr_start; | |
229 | struct rbdr *rbdr_cont; | |
230 | bool en_tcp_reassembly; | |
231 | u8 cq_qs; /* CQ's QS to which this RQ is assigned */ | |
232 | u8 cq_idx; /* CQ index (0 to 7) in the QS */ | |
233 | u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ | |
234 | u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ | |
235 | u8 start_rbdr_qs; /* First buffer ptrs - QS num */ | |
236 | u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ | |
237 | u8 caching; | |
238 | struct rx_tx_queue_stats stats; | |
239 | } ____cacheline_aligned_in_smp; | |
240 | ||
241 | struct cmp_queue { | |
242 | bool enable; | |
243 | u16 thresh; | |
244 | spinlock_t lock; /* lock to serialize processing CQEs */ | |
245 | void *desc; | |
246 | struct q_desc_mem dmem; | |
247 | struct cmp_queue_stats stats; | |
39ad6eea | 248 | int irq; |
4863dea3 SG |
249 | } ____cacheline_aligned_in_smp; |
250 | ||
251 | struct snd_queue { | |
252 | bool enable; | |
253 | u8 cq_qs; /* CQ's QS to which this SQ is pointing */ | |
254 | u8 cq_idx; /* CQ index (0 to 7) in the above QS */ | |
255 | u16 thresh; | |
256 | atomic_t free_cnt; | |
257 | u32 head; | |
258 | u32 tail; | |
259 | u64 *skbuff; | |
260 | void *desc; | |
261 | ||
262 | #define TSO_HEADER_SIZE 128 | |
263 | /* For TSO segment's header */ | |
264 | char *tso_hdrs; | |
265 | dma_addr_t tso_hdrs_phys; | |
266 | ||
267 | cpumask_t affinity_mask; | |
268 | struct q_desc_mem dmem; | |
269 | struct rx_tx_queue_stats stats; | |
270 | } ____cacheline_aligned_in_smp; | |
271 | ||
272 | struct queue_set { | |
273 | bool enable; | |
274 | bool be_en; | |
275 | u8 vnic_id; | |
276 | u8 rq_cnt; | |
277 | u8 cq_cnt; | |
278 | u64 cq_len; | |
279 | u8 sq_cnt; | |
280 | u64 sq_len; | |
281 | u8 rbdr_cnt; | |
282 | u64 rbdr_len; | |
283 | struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; | |
284 | struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; | |
285 | struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; | |
286 | struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; | |
287 | } ____cacheline_aligned_in_smp; | |
288 | ||
289 | #define GET_RBDR_DESC(RING, idx)\ | |
290 | (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) | |
291 | #define GET_SQ_DESC(RING, idx)\ | |
292 | (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) | |
293 | #define GET_CQ_DESC(RING, idx)\ | |
294 | (&(((union cq_desc_t *)((RING)->desc))[idx])) | |
295 | ||
296 | /* CQ status bits */ | |
297 | #define CQ_WR_FULL BIT(26) | |
298 | #define CQ_WR_DISABLE BIT(25) | |
299 | #define CQ_WR_FAULT BIT(24) | |
300 | #define CQ_CQE_COUNT (0xFFFF << 0) | |
301 | ||
302 | #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) | |
303 | ||
aa2e259b SG |
304 | void nicvf_config_vlan_stripping(struct nicvf *nic, |
305 | netdev_features_t features); | |
4863dea3 SG |
306 | int nicvf_set_qset_resources(struct nicvf *nic); |
307 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable); | |
308 | void nicvf_qset_config(struct nicvf *nic, bool enable); | |
309 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
310 | int qidx, bool enable); | |
311 | ||
312 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); | |
313 | void nicvf_sq_disable(struct nicvf *nic, int qidx); | |
314 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); | |
315 | void nicvf_sq_free_used_descs(struct net_device *netdev, | |
316 | struct snd_queue *sq, int qidx); | |
317 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); | |
318 | ||
319 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); | |
320 | void nicvf_rbdr_task(unsigned long data); | |
321 | void nicvf_rbdr_work(struct work_struct *work); | |
322 | ||
323 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); | |
324 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); | |
325 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); | |
326 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); | |
327 | ||
328 | /* Register access APIs */ | |
329 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); | |
330 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset); | |
331 | void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); | |
332 | u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); | |
333 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, | |
334 | u64 qidx, u64 val); | |
335 | u64 nicvf_queue_reg_read(struct nicvf *nic, | |
336 | u64 offset, u64 qidx); | |
337 | ||
338 | /* Stats */ | |
339 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); | |
340 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); | |
ad2ecebd | 341 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); |
4863dea3 SG |
342 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, |
343 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx); | |
344 | #endif /* NICVF_QUEUES_H */ |