]>
Commit | Line | Data |
---|---|---|
72c00912 RV |
1 | /********************************************************************** |
2 | * Author: Cavium, Inc. | |
3 | * | |
4 | * Contact: support@cavium.com | |
5 | * Please include "LiquidIO" in the subject. | |
6 | * | |
7 | * Copyright (c) 2003-2015 Cavium, Inc. | |
8 | * | |
9 | * This file is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License, Version 2, as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This file is distributed in the hope that it will be useful, but | |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | |
16 | * NONINFRINGEMENT. See the GNU General Public License for more | |
17 | * details. | |
18 | * | |
19 | * This file may also be available under a different license from Cavium. | |
20 | * Contact Cavium, Inc. for more information | |
21 | **********************************************************************/ | |
22 | ||
23 | #include <linux/pci.h> | |
24 | #include <linux/netdevice.h> | |
25 | #include <linux/vmalloc.h> | |
26 | #include "liquidio_common.h" | |
27 | #include "octeon_droq.h" | |
28 | #include "octeon_iq.h" | |
29 | #include "response_manager.h" | |
30 | #include "octeon_device.h" | |
31 | #include "cn23xx_pf_device.h" | |
32 | #include "octeon_main.h" | |
33 | ||
34 | #define RESET_NOTDONE 0 | |
35 | #define RESET_DONE 1 | |
36 | ||
37 | /* Change the value of SLI Packet Input Jabber Register to allow | |
38 | * VXLAN TSO packets which can be 64424 bytes, exceeding the | |
39 | * MAX_GSO_SIZE we supplied to the kernel | |
40 | */ | |
41 | #define CN23XX_INPUT_JABBER 64600 | |
42 | ||
43 | #define LIOLUT_RING_DISTRIBUTION 9 | |
44 | const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = { | |
45 | 0, 8, 4, 2, 2, 2, 1, 1, 1 | |
46 | }; | |
47 | ||
48 | void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) | |
49 | { | |
50 | int i = 0; | |
51 | u32 regval = 0; | |
52 | struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; | |
53 | ||
54 | /*In cn23xx_soft_reset*/ | |
55 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n", | |
56 | "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG), | |
57 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG))); | |
58 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
59 | "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1), | |
60 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1))); | |
61 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
62 | "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST, | |
63 | lio_pci_readq(oct, CN23XX_RST_SOFT_RST)); | |
64 | ||
65 | /*In cn23xx_set_dpi_regs*/ | |
66 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
67 | "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL, | |
68 | lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL)); | |
69 | ||
70 | for (i = 0; i < 6; i++) { | |
71 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
72 | "CN23XX_DPI_DMA_ENG_ENB", i, | |
73 | CN23XX_DPI_DMA_ENG_ENB(i), | |
74 | lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i))); | |
75 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
76 | "CN23XX_DPI_DMA_ENG_BUF", i, | |
77 | CN23XX_DPI_DMA_ENG_BUF(i), | |
78 | lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i))); | |
79 | } | |
80 | ||
81 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL", | |
82 | CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL)); | |
83 | ||
84 | /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */ | |
85 | pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); | |
86 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
87 | "CN23XX_CONFIG_PCIE_DEVCTL", | |
88 | CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval)); | |
89 | ||
90 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
91 | "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port, | |
92 | CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), | |
93 | lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))); | |
94 | ||
95 | /*In cn23xx_specific_regs_setup */ | |
96 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
97 | "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port, | |
98 | CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)), | |
99 | CVM_CAST64(octeon_read_csr64( | |
100 | oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); | |
101 | ||
102 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
103 | "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST), | |
104 | (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST)); | |
105 | ||
106 | /*In cn23xx_setup_global_mac_regs*/ | |
107 | for (i = 0; i < CN23XX_MAX_MACS; i++) { | |
108 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
109 | "CN23XX_SLI_PKT_MAC_RINFO64", i, | |
110 | CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)), | |
111 | CVM_CAST64(octeon_read_csr64 | |
112 | (oct, CN23XX_SLI_PKT_MAC_RINFO64 | |
113 | (i, oct->pf_num)))); | |
114 | } | |
115 | ||
116 | /*In cn23xx_setup_global_input_regs*/ | |
117 | for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { | |
118 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
119 | "CN23XX_SLI_IQ_PKT_CONTROL64", i, | |
120 | CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)), | |
121 | CVM_CAST64(octeon_read_csr64 | |
122 | (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i)))); | |
123 | } | |
124 | ||
125 | /*In cn23xx_setup_global_output_regs*/ | |
126 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
127 | "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK), | |
128 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK))); | |
129 | ||
130 | for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { | |
131 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
132 | "CN23XX_SLI_OQ_PKT_CONTROL", i, | |
133 | CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)), | |
134 | CVM_CAST64(octeon_read_csr( | |
135 | oct, CN23XX_SLI_OQ_PKT_CONTROL(i)))); | |
136 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
137 | "CN23XX_SLI_OQ_PKT_INT_LEVELS", i, | |
138 | CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)), | |
139 | CVM_CAST64(octeon_read_csr64( | |
140 | oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i)))); | |
141 | } | |
142 | ||
143 | /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/ | |
144 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
145 | "cn23xx->intr_enb_reg64", | |
146 | CVM_CAST64((long)(cn23xx->intr_enb_reg64)), | |
147 | CVM_CAST64(readq(cn23xx->intr_enb_reg64))); | |
148 | ||
149 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
150 | "cn23xx->intr_sum_reg64", | |
151 | CVM_CAST64((long)(cn23xx->intr_sum_reg64)), | |
152 | CVM_CAST64(readq(cn23xx->intr_sum_reg64))); | |
153 | ||
154 | /*In cn23xx_setup_iq_regs*/ | |
155 | for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { | |
156 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
157 | "CN23XX_SLI_IQ_BASE_ADDR64", i, | |
158 | CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)), | |
159 | CVM_CAST64(octeon_read_csr64( | |
160 | oct, CN23XX_SLI_IQ_BASE_ADDR64(i)))); | |
161 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
162 | "CN23XX_SLI_IQ_SIZE", i, | |
163 | CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)), | |
164 | CVM_CAST64(octeon_read_csr | |
165 | (oct, CN23XX_SLI_IQ_SIZE(i)))); | |
166 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
167 | "CN23XX_SLI_IQ_DOORBELL", i, | |
168 | CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)), | |
169 | CVM_CAST64(octeon_read_csr64( | |
170 | oct, CN23XX_SLI_IQ_DOORBELL(i)))); | |
171 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
172 | "CN23XX_SLI_IQ_INSTR_COUNT64", i, | |
173 | CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)), | |
174 | CVM_CAST64(octeon_read_csr64( | |
175 | oct, CN23XX_SLI_IQ_INSTR_COUNT64(i)))); | |
176 | } | |
177 | ||
178 | /*In cn23xx_setup_oq_regs*/ | |
179 | for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { | |
180 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
181 | "CN23XX_SLI_OQ_BASE_ADDR64", i, | |
182 | CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)), | |
183 | CVM_CAST64(octeon_read_csr64( | |
184 | oct, CN23XX_SLI_OQ_BASE_ADDR64(i)))); | |
185 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
186 | "CN23XX_SLI_OQ_SIZE", i, | |
187 | CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)), | |
188 | CVM_CAST64(octeon_read_csr | |
189 | (oct, CN23XX_SLI_OQ_SIZE(i)))); | |
190 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
191 | "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i, | |
192 | CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)), | |
193 | CVM_CAST64(octeon_read_csr( | |
194 | oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)))); | |
195 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
196 | "CN23XX_SLI_OQ_PKTS_SENT", i, | |
197 | CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)), | |
198 | CVM_CAST64(octeon_read_csr64( | |
199 | oct, CN23XX_SLI_OQ_PKTS_SENT(i)))); | |
200 | dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", | |
201 | "CN23XX_SLI_OQ_PKTS_CREDIT", i, | |
202 | CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)), | |
203 | CVM_CAST64(octeon_read_csr64( | |
204 | oct, CN23XX_SLI_OQ_PKTS_CREDIT(i)))); | |
205 | } | |
206 | ||
207 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
208 | "CN23XX_SLI_PKT_TIME_INT", | |
209 | CVM_CAST64(CN23XX_SLI_PKT_TIME_INT), | |
210 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT))); | |
211 | dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", | |
212 | "CN23XX_SLI_PKT_CNT_INT", | |
213 | CVM_CAST64(CN23XX_SLI_PKT_CNT_INT), | |
214 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT))); | |
215 | } | |
216 | ||
3451b97c RV |
217 | static void cn23xx_enable_error_reporting(struct octeon_device *oct) |
218 | { | |
219 | u32 regval; | |
220 | u32 uncorrectable_err_mask, corrtable_err_status; | |
221 | ||
222 | pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); | |
223 | if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) { | |
224 | uncorrectable_err_mask = 0; | |
225 | corrtable_err_status = 0; | |
226 | pci_read_config_dword(oct->pci_dev, | |
227 | CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK, | |
228 | &uncorrectable_err_mask); | |
229 | pci_read_config_dword(oct->pci_dev, | |
230 | CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS, | |
231 | &corrtable_err_status); | |
232 | dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n" | |
233 | "\tdev_ctl_status_reg = 0x%08x\n" | |
234 | "\tuncorrectable_error_mask_reg = 0x%08x\n" | |
235 | "\tcorrectable_error_status_reg = 0x%08x\n", | |
236 | regval, uncorrectable_err_mask, | |
237 | corrtable_err_status); | |
238 | } | |
239 | ||
240 | regval |= 0xf; /* Enable Link error reporting */ | |
241 | ||
242 | dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n", | |
243 | oct->octeon_id); | |
244 | pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval); | |
245 | } | |
246 | ||
72c00912 RV |
247 | static u32 cn23xx_coprocessor_clock(struct octeon_device *oct) |
248 | { | |
249 | /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER | |
250 | * for SLI. | |
251 | */ | |
252 | ||
253 | /* TBD: get the info in Hand-shake */ | |
254 | return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50); | |
255 | } | |
256 | ||
3451b97c RV |
257 | u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) |
258 | { | |
259 | /* This gives the SLI clock per microsec */ | |
260 | u32 oqticks_per_us = cn23xx_coprocessor_clock(oct); | |
261 | ||
262 | oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us; | |
263 | ||
264 | /* This gives the clock cycles per millisecond */ | |
265 | oqticks_per_us *= 1000; | |
266 | ||
267 | /* This gives the oq ticks (1024 core clock cycles) per millisecond */ | |
268 | oqticks_per_us /= 1024; | |
269 | ||
270 | /* time_intr is in microseconds. The next 2 steps gives the oq ticks | |
271 | * corressponding to time_intr. | |
272 | */ | |
273 | oqticks_per_us *= time_intr_in_us; | |
274 | oqticks_per_us /= 1000; | |
275 | ||
276 | return oqticks_per_us; | |
277 | } | |
278 | ||
279 | static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) | |
280 | { | |
281 | u64 reg_val; | |
282 | u16 mac_no = oct->pcie_port; | |
283 | u16 pf_num = oct->pf_num; | |
284 | ||
285 | /* programming SRN and TRS for each MAC(0..3) */ | |
286 | ||
287 | dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n", | |
288 | __func__, mac_no); | |
289 | /* By default, mapping all 64 IOQs to a single MACs */ | |
290 | ||
291 | reg_val = | |
292 | octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); | |
293 | ||
294 | if (oct->rev_id == OCTEON_CN23XX_REV_1_1) { | |
295 | /* setting SRN <6:0> */ | |
296 | reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1; | |
297 | } else { | |
298 | /* setting SRN <6:0> */ | |
299 | reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF; | |
300 | } | |
301 | ||
302 | /* setting TRS <23:16> */ | |
303 | reg_val = reg_val | | |
304 | (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); | |
305 | /* write these settings to MAC register */ | |
306 | octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), | |
307 | reg_val); | |
308 | ||
309 | dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n", | |
310 | mac_no, pf_num, (u64)octeon_read_csr64 | |
311 | (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num))); | |
312 | } | |
313 | ||
314 | static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) | |
315 | { | |
316 | u32 q_no, ern, srn; | |
317 | u64 pf_num; | |
318 | u64 intr_threshold, reg_val; | |
319 | struct octeon_instr_queue *iq; | |
320 | struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; | |
321 | ||
322 | pf_num = oct->pf_num; | |
323 | ||
324 | srn = oct->sriov_info.pf_srn; | |
325 | ern = srn + oct->sriov_info.num_pf_rings; | |
326 | ||
327 | /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg | |
328 | * for all queues.Only PF can set these bits. | |
329 | * bits 29:30 indicate the MAC num. | |
330 | * bits 32:47 indicate the PVF num. | |
331 | */ | |
332 | for (q_no = 0; q_no < ern; q_no++) { | |
333 | reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; | |
334 | reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS; | |
335 | ||
336 | octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), | |
337 | reg_val); | |
338 | } | |
339 | ||
340 | /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for | |
341 | * pf queues | |
342 | */ | |
343 | for (q_no = srn; q_no < ern; q_no++) { | |
344 | void __iomem *inst_cnt_reg; | |
345 | ||
346 | iq = oct->instr_queue[q_no]; | |
347 | if (iq) | |
348 | inst_cnt_reg = iq->inst_cnt_reg; | |
349 | else | |
350 | inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr + | |
351 | CN23XX_SLI_IQ_INSTR_COUNT64(q_no); | |
352 | ||
353 | reg_val = | |
354 | octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); | |
355 | ||
356 | reg_val |= CN23XX_PKT_INPUT_CTL_MASK; | |
357 | ||
358 | octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), | |
359 | reg_val); | |
360 | ||
361 | /* Set WMARK level for triggering PI_INT */ | |
362 | /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */ | |
363 | intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & | |
364 | CN23XX_PKT_IN_DONE_WMARK_MASK; | |
365 | ||
366 | writeq((readq(inst_cnt_reg) & | |
367 | ~(CN23XX_PKT_IN_DONE_WMARK_MASK << | |
368 | CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | | |
369 | (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS), | |
370 | inst_cnt_reg); | |
371 | } | |
372 | return 0; | |
373 | } | |
374 | ||
375 | static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) | |
376 | { | |
377 | u32 reg_val; | |
378 | u32 q_no, ern, srn; | |
379 | u64 time_threshold; | |
380 | ||
381 | struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; | |
382 | ||
383 | srn = oct->sriov_info.pf_srn; | |
384 | ern = srn + oct->sriov_info.num_pf_rings; | |
385 | ||
386 | if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) { | |
387 | octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32); | |
388 | } else { | |
389 | /** Set Output queue watermark to 0 to disable backpressure */ | |
390 | octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0); | |
391 | } | |
392 | ||
393 | for (q_no = srn; q_no < ern; q_no++) { | |
394 | reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); | |
395 | ||
396 | /* set IPTR & DPTR */ | |
397 | reg_val |= | |
398 | (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR); | |
399 | ||
400 | /* reset BMODE */ | |
401 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); | |
402 | ||
403 | /* No Relaxed Ordering, No Snoop, 64-bit Byte swap | |
404 | * for Output Queue ScatterList | |
405 | * reset ROR_P, NSR_P | |
406 | */ | |
407 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); | |
408 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); | |
409 | ||
410 | #ifdef __LITTLE_ENDIAN_BITFIELD | |
411 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); | |
412 | #else | |
413 | reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); | |
414 | #endif | |
415 | /* No Relaxed Ordering, No Snoop, 64-bit Byte swap | |
416 | * for Output Queue Data | |
417 | * reset ROR, NSR | |
418 | */ | |
419 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); | |
420 | reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); | |
421 | /* set the ES bit */ | |
422 | reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); | |
423 | ||
424 | /* write all the selected settings */ | |
425 | octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); | |
426 | ||
427 | /* Enabling these interrupt in oct->fn_list.enable_interrupt() | |
428 | * routine which called after IOQ init. | |
429 | * Set up interrupt packet and time thresholds | |
430 | * for all the OQs | |
431 | */ | |
432 | time_threshold = cn23xx_pf_get_oq_ticks( | |
433 | oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf)); | |
434 | ||
435 | octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), | |
436 | (CFG_GET_OQ_INTR_PKT(cn23xx->conf) | | |
437 | (time_threshold << 32))); | |
438 | } | |
439 | ||
440 | /** Setting the water mark level for pko back pressure **/ | |
441 | writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK); | |
442 | ||
443 | /** Disabling setting OQs in reset when ring has no dorebells | |
444 | * enabling this will cause of head of line blocking | |
445 | */ | |
446 | /* Do it only for pass1.1. and pass1.2 */ | |
447 | if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) || | |
448 | (oct->rev_id == OCTEON_CN23XX_REV_1_1)) | |
449 | writeq(readq((u8 *)oct->mmio[0].hw_addr + | |
450 | CN23XX_SLI_GBL_CONTROL) | 0x2, | |
451 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL); | |
452 | ||
453 | /** Enable channel-level backpressure */ | |
454 | if (oct->pf_num) | |
455 | writeq(0xffffffffffffffffULL, | |
456 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S); | |
457 | else | |
458 | writeq(0xffffffffffffffffULL, | |
459 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S); | |
460 | } | |
461 | ||
462 | static int cn23xx_setup_pf_device_regs(struct octeon_device *oct) | |
463 | { | |
464 | cn23xx_enable_error_reporting(oct); | |
465 | ||
466 | /* program the MAC(0..3)_RINFO before setting up input/output regs */ | |
467 | cn23xx_setup_global_mac_regs(oct); | |
468 | ||
469 | if (cn23xx_pf_setup_global_input_regs(oct)) | |
470 | return -1; | |
471 | ||
472 | cn23xx_pf_setup_global_output_regs(oct); | |
473 | ||
474 | /* Default error timeout value should be 0x200000 to avoid host hang | |
475 | * when reads invalid register | |
476 | */ | |
477 | octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL, | |
478 | CN23XX_SLI_WINDOW_CTL_DEFAULT); | |
479 | ||
480 | /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ | |
481 | octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER); | |
482 | return 0; | |
483 | } | |
484 | ||
72c00912 RV |
485 | static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) |
486 | { | |
487 | struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; | |
488 | u64 pkt_in_done; | |
489 | ||
490 | iq_no += oct->sriov_info.pf_srn; | |
491 | ||
492 | /* Write the start of the input queue's ring and its size */ | |
493 | octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no), | |
494 | iq->base_addr_dma); | |
495 | octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); | |
496 | ||
497 | /* Remember the doorbell & instruction count register addr | |
498 | * for this queue | |
499 | */ | |
500 | iq->doorbell_reg = | |
501 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no); | |
502 | iq->inst_cnt_reg = | |
503 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no); | |
504 | dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", | |
505 | iq_no, iq->doorbell_reg, iq->inst_cnt_reg); | |
506 | ||
507 | /* Store the current instruction counter (used in flush_iq | |
508 | * calculation) | |
509 | */ | |
510 | pkt_in_done = readq(iq->inst_cnt_reg); | |
511 | ||
512 | /* Clear the count by writing back what we read, but don't | |
513 | * enable interrupts | |
514 | */ | |
515 | writeq(pkt_in_done, iq->inst_cnt_reg); | |
516 | ||
517 | iq->reset_instr_cnt = 0; | |
518 | } | |
519 | ||
520 | static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) | |
521 | { | |
522 | u32 reg_val; | |
523 | struct octeon_droq *droq = oct->droq[oq_no]; | |
524 | ||
525 | oq_no += oct->sriov_info.pf_srn; | |
526 | ||
527 | octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no), | |
528 | droq->desc_ring_dma); | |
529 | octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); | |
530 | ||
531 | octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), | |
532 | (droq->buffer_size | (OCT_RH_SIZE << 16))); | |
533 | ||
534 | /* Get the mapped address of the pkt_sent and pkts_credit regs */ | |
535 | droq->pkts_sent_reg = | |
536 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no); | |
537 | droq->pkts_credit_reg = | |
538 | (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); | |
539 | ||
540 | /* Enable this output queue to generate Packet Timer Interrupt | |
541 | */ | |
542 | reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); | |
543 | reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB; | |
544 | octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no), | |
545 | reg_val); | |
546 | ||
547 | /* Enable this output queue to generate Packet Count Interrupt | |
548 | */ | |
549 | reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); | |
550 | reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB; | |
551 | octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no), | |
552 | reg_val); | |
553 | } | |
554 | ||
555 | static void cn23xx_get_pcie_qlmport(struct octeon_device *oct) | |
556 | { | |
557 | oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; | |
558 | ||
559 | dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n", | |
560 | oct->pcie_port); | |
561 | } | |
562 | ||
563 | static void cn23xx_get_pf_num(struct octeon_device *oct) | |
564 | { | |
565 | u32 fdl_bit = 0; | |
566 | ||
567 | /** Read Function Dependency Link reg to get the function number */ | |
568 | pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit); | |
569 | oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & | |
570 | CN23XX_PCIE_SRIOV_FDL_MASK); | |
571 | } | |
572 | ||
573 | static void cn23xx_setup_reg_address(struct octeon_device *oct) | |
574 | { | |
575 | u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; | |
576 | struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; | |
577 | ||
578 | oct->reg_list.pci_win_wr_addr_hi = | |
579 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI); | |
580 | oct->reg_list.pci_win_wr_addr_lo = | |
581 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO); | |
582 | oct->reg_list.pci_win_wr_addr = | |
583 | (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64); | |
584 | ||
585 | oct->reg_list.pci_win_rd_addr_hi = | |
586 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI); | |
587 | oct->reg_list.pci_win_rd_addr_lo = | |
588 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO); | |
589 | oct->reg_list.pci_win_rd_addr = | |
590 | (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64); | |
591 | ||
592 | oct->reg_list.pci_win_wr_data_hi = | |
593 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI); | |
594 | oct->reg_list.pci_win_wr_data_lo = | |
595 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO); | |
596 | oct->reg_list.pci_win_wr_data = | |
597 | (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64); | |
598 | ||
599 | oct->reg_list.pci_win_rd_data_hi = | |
600 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI); | |
601 | oct->reg_list.pci_win_rd_data_lo = | |
602 | (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO); | |
603 | oct->reg_list.pci_win_rd_data = | |
604 | (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64); | |
605 | ||
606 | cn23xx_get_pcie_qlmport(oct); | |
607 | ||
608 | cn23xx->intr_mask64 = CN23XX_INTR_MASK; | |
609 | cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME; | |
610 | if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) | |
611 | cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX; | |
612 | ||
613 | cn23xx->intr_sum_reg64 = | |
614 | bar0_pciaddr + | |
615 | CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); | |
616 | cn23xx->intr_enb_reg64 = | |
617 | bar0_pciaddr + | |
618 | CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); | |
619 | } | |
620 | ||
621 | static int cn23xx_sriov_config(struct octeon_device *oct) | |
622 | { | |
623 | u32 total_rings; | |
624 | struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; | |
625 | /* num_vfs is already filled for us */ | |
626 | u32 pf_srn, num_pf_rings; | |
627 | ||
628 | cn23xx->conf = | |
629 | (struct octeon_config *)oct_get_config_info(oct, LIO_23XX); | |
630 | switch (oct->rev_id) { | |
631 | case OCTEON_CN23XX_REV_1_0: | |
632 | total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0; | |
633 | break; | |
634 | case OCTEON_CN23XX_REV_1_1: | |
635 | total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1; | |
636 | break; | |
637 | default: | |
638 | total_rings = CN23XX_MAX_RINGS_PER_PF; | |
639 | break; | |
640 | } | |
641 | if (!oct->sriov_info.num_pf_rings) { | |
642 | if (total_rings > num_present_cpus()) | |
643 | num_pf_rings = num_present_cpus(); | |
644 | else | |
645 | num_pf_rings = total_rings; | |
646 | } else { | |
647 | num_pf_rings = oct->sriov_info.num_pf_rings; | |
648 | ||
649 | if (num_pf_rings > total_rings) { | |
650 | dev_warn(&oct->pci_dev->dev, | |
651 | "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n", | |
652 | num_pf_rings, total_rings); | |
653 | num_pf_rings = total_rings; | |
654 | } | |
655 | } | |
656 | ||
657 | total_rings = num_pf_rings; | |
658 | /* the first ring of the pf */ | |
659 | pf_srn = total_rings - num_pf_rings; | |
660 | ||
661 | oct->sriov_info.trs = total_rings; | |
662 | oct->sriov_info.pf_srn = pf_srn; | |
663 | oct->sriov_info.num_pf_rings = num_pf_rings; | |
664 | dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n", | |
665 | oct->sriov_info.trs, oct->sriov_info.pf_srn, | |
666 | oct->sriov_info.num_pf_rings); | |
667 | return 0; | |
668 | } | |
669 | ||
670 | int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) | |
671 | { | |
672 | if (octeon_map_pci_barx(oct, 0, 0)) | |
673 | return 1; | |
674 | ||
675 | if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { | |
676 | dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n", | |
677 | __func__); | |
678 | octeon_unmap_pci_barx(oct, 0); | |
679 | return 1; | |
680 | } | |
681 | ||
682 | cn23xx_get_pf_num(oct); | |
683 | ||
684 | if (cn23xx_sriov_config(oct)) { | |
685 | octeon_unmap_pci_barx(oct, 0); | |
686 | octeon_unmap_pci_barx(oct, 1); | |
687 | return 1; | |
688 | } | |
689 | ||
690 | octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL); | |
691 | ||
692 | oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs; | |
693 | oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs; | |
3451b97c | 694 | oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs; |
72c00912 RV |
695 | |
696 | cn23xx_setup_reg_address(oct); | |
697 | ||
698 | oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct); | |
699 | ||
700 | return 0; | |
701 | } | |
702 | ||
703 | int validate_cn23xx_pf_config_info(struct octeon_device *oct, | |
704 | struct octeon_config *conf23xx) | |
705 | { | |
706 | if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) { | |
707 | dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", | |
708 | __func__, CFG_GET_IQ_MAX_Q(conf23xx), | |
709 | CN23XX_MAX_INPUT_QUEUES); | |
710 | return 1; | |
711 | } | |
712 | ||
713 | if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) { | |
714 | dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", | |
715 | __func__, CFG_GET_OQ_MAX_Q(conf23xx), | |
716 | CN23XX_MAX_OUTPUT_QUEUES); | |
717 | return 1; | |
718 | } | |
719 | ||
720 | if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR && | |
721 | CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) { | |
722 | dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", | |
723 | __func__); | |
724 | return 1; | |
725 | } | |
726 | ||
727 | if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) || | |
728 | !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) { | |
729 | dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", | |
730 | __func__); | |
731 | return 1; | |
732 | } | |
733 | ||
734 | if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) { | |
735 | dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", | |
736 | __func__); | |
737 | return 1; | |
738 | } | |
739 | ||
740 | return 0; | |
741 | } | |
742 | ||
743 | void cn23xx_dump_iq_regs(struct octeon_device *oct) | |
744 | { | |
745 | u32 regval, q_no; | |
746 | ||
747 | dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", | |
748 | CN23XX_SLI_IQ_DOORBELL(0), | |
749 | CVM_CAST64(octeon_read_csr64 | |
750 | (oct, CN23XX_SLI_IQ_DOORBELL(0)))); | |
751 | ||
752 | dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", | |
753 | CN23XX_SLI_IQ_BASE_ADDR64(0), | |
754 | CVM_CAST64(octeon_read_csr64 | |
755 | (oct, CN23XX_SLI_IQ_BASE_ADDR64(0)))); | |
756 | ||
757 | dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", | |
758 | CN23XX_SLI_IQ_SIZE(0), | |
759 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0)))); | |
760 | ||
761 | dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n", | |
762 | CN23XX_SLI_CTL_STATUS, | |
763 | CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS))); | |
764 | ||
765 | for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) { | |
766 | dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", | |
767 | q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), | |
768 | CVM_CAST64(octeon_read_csr64 | |
769 | (oct, | |
770 | CN23XX_SLI_IQ_PKT_CONTROL64(q_no)))); | |
771 | } | |
772 | ||
773 | pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); | |
774 | dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", | |
775 | CN23XX_CONFIG_PCIE_DEVCTL, regval); | |
776 | ||
777 | dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n", | |
778 | oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), | |
779 | CVM_CAST64(lio_pci_readq( | |
780 | oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)))); | |
781 | ||
782 | dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n", | |
783 | oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port), | |
784 | CVM_CAST64(octeon_read_csr64( | |
785 | oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); | |
786 | } |