]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/raw/ifpga/base/ifpga_feature_dev.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / raw / ifpga / base / ifpga_feature_dev.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5#include <sys/ioctl.h>
f67539c2 6#include <rte_vfio.h>
11fdf7f2
TL
7
8#include "ifpga_feature_dev.h"
9
10/*
11 * Enable Port by clear the port soft reset bit, which is set by default.
12 * The AFU is unable to respond to any MMIO access while in reset.
13 * __fpga_port_enable function should only be used after __fpga_port_disable
14 * function.
15 */
16void __fpga_port_enable(struct ifpga_port_hw *port)
17{
18 struct feature_port_header *port_hdr;
19 struct feature_port_control control;
20
21 WARN_ON(!port->disable_count);
22
23 if (--port->disable_count != 0)
24 return;
25
26 port_hdr = get_port_feature_ioaddr_by_index(port,
27 PORT_FEATURE_ID_HEADER);
28 WARN_ON(!port_hdr);
29
30 control.csr = readq(&port_hdr->control);
31 control.port_sftrst = 0x0;
32 writeq(control.csr, &port_hdr->control);
33}
34
35int __fpga_port_disable(struct ifpga_port_hw *port)
36{
37 struct feature_port_header *port_hdr;
38 struct feature_port_control control;
39
40 if (port->disable_count++ != 0)
41 return 0;
42
43 port_hdr = get_port_feature_ioaddr_by_index(port,
44 PORT_FEATURE_ID_HEADER);
45 WARN_ON(!port_hdr);
46
47 /* Set port soft reset */
48 control.csr = readq(&port_hdr->control);
49 control.port_sftrst = 0x1;
50 writeq(control.csr, &port_hdr->control);
51
52 /*
53 * HW sets ack bit to 1 when all outstanding requests have been drained
54 * on this port and minimum soft reset pulse width has elapsed.
55 * Driver polls port_soft_reset_ack to determine if reset done by HW.
56 */
57 control.port_sftrst_ack = 1;
58
59 if (fpga_wait_register_field(port_sftrst_ack, control,
60 &port_hdr->control, RST_POLL_TIMEOUT,
61 RST_POLL_INVL)) {
62 dev_err(port, "timeout, fail to reset device\n");
63 return -ETIMEDOUT;
64 }
65
66 return 0;
67}
68
69int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
70{
71 struct feature_port_header *port_hdr;
72 u64 guidl, guidh;
73
9f95a23c
TL
74 if (!uuid)
75 return -EINVAL;
76
11fdf7f2
TL
77 port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
78
79 spinlock_lock(&port->lock);
80 guidl = readq(&port_hdr->afu_header.guid.b[0]);
81 guidh = readq(&port_hdr->afu_header.guid.b[8]);
82 spinlock_unlock(&port->lock);
83
9f95a23c
TL
84 opae_memcpy(uuid->b, &guidl, sizeof(u64));
85 opae_memcpy(uuid->b + 8, &guidh, sizeof(u64));
11fdf7f2
TL
86
87 return 0;
88}
89
90/* Mask / Unmask Port Errors by the Error Mask register. */
91void port_err_mask(struct ifpga_port_hw *port, bool mask)
92{
93 struct feature_port_error *port_err;
94 struct feature_port_err_key err_mask;
95
96 port_err = get_port_feature_ioaddr_by_index(port,
97 PORT_FEATURE_ID_ERROR);
98
99 if (mask)
100 err_mask.csr = PORT_ERR_MASK;
101 else
102 err_mask.csr = 0;
103
104 writeq(err_mask.csr, &port_err->error_mask);
105}
106
107/* Clear All Port Errors. */
108int port_err_clear(struct ifpga_port_hw *port, u64 err)
109{
110 struct feature_port_header *port_hdr;
111 struct feature_port_error *port_err;
112 struct feature_port_err_key mask;
113 struct feature_port_first_err_key first;
114 struct feature_port_status status;
115 int ret = 0;
116
117 port_err = get_port_feature_ioaddr_by_index(port,
118 PORT_FEATURE_ID_ERROR);
119 port_hdr = get_port_feature_ioaddr_by_index(port,
120 PORT_FEATURE_ID_HEADER);
121
122 /*
123 * Clear All Port Errors
124 *
125 * - Check for AP6 State
126 * - Halt Port by keeping Port in reset
127 * - Set PORT Error mask to all 1 to mask errors
128 * - Clear all errors
129 * - Set Port mask to all 0 to enable errors
130 * - All errors start capturing new errors
131 * - Enable Port by pulling the port out of reset
132 */
133
134 /* If device is still in AP6 state, can not clear any error.*/
135 status.csr = readq(&port_hdr->status);
136 if (status.power_state == PORT_POWER_STATE_AP6) {
137 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
138 return -EBUSY;
139 }
140
141 /* Halt Port by keeping Port in reset */
142 ret = __fpga_port_disable(port);
143 if (ret)
144 return ret;
145
146 /* Mask all errors */
147 port_err_mask(port, true);
148
149 /* Clear errors if err input matches with current port errors.*/
150 mask.csr = readq(&port_err->port_error);
151
152 if (mask.csr == err) {
153 writeq(mask.csr, &port_err->port_error);
154
155 first.csr = readq(&port_err->port_first_error);
156 writeq(first.csr, &port_err->port_first_error);
157 } else {
158 ret = -EBUSY;
159 }
160
161 /* Clear mask */
162 port_err_mask(port, false);
163
164 /* Enable the Port by clear the reset */
165 __fpga_port_enable(port);
166
167 return ret;
168}
169
170int port_clear_error(struct ifpga_port_hw *port)
171{
172 struct feature_port_error *port_err;
173 struct feature_port_err_key error;
174
175 port_err = get_port_feature_ioaddr_by_index(port,
176 PORT_FEATURE_ID_ERROR);
177 error.csr = readq(&port_err->port_error);
178
179 dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
180
181 return port_err_clear(port, error.csr);
182}
183
9f95a23c
TL
184static struct feature_driver fme_feature_drvs[] = {
185 {FEATURE_DRV(FME_FEATURE_ID_HEADER, FME_FEATURE_HEADER,
186 &fme_hdr_ops),},
187 {FEATURE_DRV(FME_FEATURE_ID_THERMAL_MGMT, FME_FEATURE_THERMAL_MGMT,
188 &fme_thermal_mgmt_ops),},
189 {FEATURE_DRV(FME_FEATURE_ID_POWER_MGMT, FME_FEATURE_POWER_MGMT,
190 &fme_power_mgmt_ops),},
191 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_ERR, FME_FEATURE_GLOBAL_ERR,
192 &fme_global_err_ops),},
193 {FEATURE_DRV(FME_FEATURE_ID_PR_MGMT, FME_FEATURE_PR_MGMT,
194 &fme_pr_mgmt_ops),},
195 {FEATURE_DRV(FME_FEATURE_ID_GLOBAL_DPERF, FME_FEATURE_GLOBAL_DPERF,
196 &fme_global_dperf_ops),},
197 {FEATURE_DRV(FME_FEATURE_ID_HSSI_ETH, FME_FEATURE_HSSI_ETH,
198 &fme_hssi_eth_ops),},
199 {FEATURE_DRV(FME_FEATURE_ID_EMIF_MGMT, FME_FEATURE_EMIF_MGMT,
200 &fme_emif_ops),},
201 {FEATURE_DRV(FME_FEATURE_ID_MAX10_SPI, FME_FEATURE_MAX10_SPI,
202 &fme_spi_master_ops),},
203 {FEATURE_DRV(FME_FEATURE_ID_NIOS_SPI, FME_FEATURE_NIOS_SPI,
204 &fme_nios_spi_master_ops),},
205 {FEATURE_DRV(FME_FEATURE_ID_I2C_MASTER, FME_FEATURE_I2C_MASTER,
206 &fme_i2c_master_ops),},
207 {FEATURE_DRV(FME_FEATURE_ID_ETH_GROUP, FME_FEATURE_ETH_GROUP,
208 &fme_eth_group_ops),},
209 {0, NULL, NULL}, /* end of arrary */
210};
211
212static struct feature_driver port_feature_drvs[] = {
213 {FEATURE_DRV(PORT_FEATURE_ID_HEADER, PORT_FEATURE_HEADER,
214 &ifpga_rawdev_port_hdr_ops)},
215 {FEATURE_DRV(PORT_FEATURE_ID_ERROR, PORT_FEATURE_ERR,
216 &ifpga_rawdev_port_error_ops)},
217 {FEATURE_DRV(PORT_FEATURE_ID_UINT, PORT_FEATURE_UINT,
218 &ifpga_rawdev_port_uint_ops)},
219 {FEATURE_DRV(PORT_FEATURE_ID_STP, PORT_FEATURE_STP,
220 &ifpga_rawdev_port_stp_ops)},
221 {FEATURE_DRV(PORT_FEATURE_ID_UAFU, PORT_FEATURE_UAFU,
222 &ifpga_rawdev_port_afu_ops)},
223 {0, NULL, NULL}, /* end of array */
224};
225
226const char *get_fme_feature_name(unsigned int id)
11fdf7f2 227{
9f95a23c 228 struct feature_driver *drv = fme_feature_drvs;
11fdf7f2 229
9f95a23c
TL
230 while (drv->name) {
231 if (drv->id == id)
232 return drv->name;
11fdf7f2 233
9f95a23c 234 drv++;
11fdf7f2 235 }
9f95a23c
TL
236
237 return NULL;
11fdf7f2
TL
238}
239
9f95a23c 240const char *get_port_feature_name(unsigned int id)
11fdf7f2 241{
9f95a23c 242 struct feature_driver *drv = port_feature_drvs;
11fdf7f2 243
9f95a23c
TL
244 while (drv->name) {
245 if (drv->id == id)
246 return drv->name;
247
248 drv++;
249 }
11fdf7f2 250
9f95a23c
TL
251 return NULL;
252}
253
254static void feature_uinit(struct ifpga_feature_list *list)
255{
256 struct ifpga_feature *feature;
257
258 TAILQ_FOREACH(feature, list, next) {
259 if (feature->state != IFPGA_FEATURE_ATTACHED)
260 continue;
261 if (feature->ops && feature->ops->uinit)
262 feature->ops->uinit(feature);
263 }
264}
265
266static int feature_init(struct feature_driver *drv,
267 struct ifpga_feature_list *list)
268{
269 struct ifpga_feature *feature;
270 int ret;
271
272 while (drv->ops) {
273 TAILQ_FOREACH(feature, list, next) {
274 if (feature->state != IFPGA_FEATURE_ATTACHED)
275 continue;
276 if (feature->id == drv->id) {
277 feature->ops = drv->ops;
278 feature->name = drv->name;
279 if (feature->ops->init) {
280 ret = feature->ops->init(feature);
281 if (ret)
282 goto error;
283 }
11fdf7f2
TL
284 }
285 }
9f95a23c 286 drv++;
11fdf7f2
TL
287 }
288
289 return 0;
9f95a23c
TL
290error:
291 feature_uinit(list);
292 return ret;
11fdf7f2
TL
293}
294
9f95a23c 295int fme_hw_init(struct ifpga_fme_hw *fme)
11fdf7f2 296{
9f95a23c 297 int ret;
11fdf7f2 298
9f95a23c
TL
299 if (fme->state != IFPGA_FME_IMPLEMENTED)
300 return -ENODEV;
301
302 ret = feature_init(fme_feature_drvs, &fme->feature_list);
303 if (ret)
304 return ret;
305
306 return 0;
307}
308
309void fme_hw_uinit(struct ifpga_fme_hw *fme)
310{
311 feature_uinit(&fme->feature_list);
312}
313
314void port_hw_uinit(struct ifpga_port_hw *port)
315{
316 feature_uinit(&port->feature_list);
11fdf7f2
TL
317}
318
319int port_hw_init(struct ifpga_port_hw *port)
320{
9f95a23c 321 int ret;
11fdf7f2
TL
322
323 if (port->state == IFPGA_PORT_UNUSED)
324 return 0;
325
9f95a23c
TL
326 ret = feature_init(port_feature_drvs, &port->feature_list);
327 if (ret)
328 goto error;
11fdf7f2
TL
329
330 return 0;
9f95a23c
TL
331error:
332 port_hw_uinit(port);
333 return ret;
11fdf7f2 334}
f67539c2
TL
335
336#define FPGA_MAX_MSIX_VEC_COUNT 128
337/* irq set buffer length for interrupt */
338#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
339 sizeof(int) * FPGA_MAX_MSIX_VEC_COUNT)
340
341/* only support msix for now*/
342static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
343 unsigned int count, s32 *fds)
344{
345 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
346 struct vfio_irq_set *irq_set;
347 int len, ret;
348 int *fd_ptr;
349
350 len = sizeof(irq_set_buf);
351
352 irq_set = (struct vfio_irq_set *)irq_set_buf;
353 irq_set->argsz = len;
354 /* 0 < irq_set->count < FPGA_MAX_MSIX_VEC_COUNT */
355 irq_set->count = count ?
356 (count > FPGA_MAX_MSIX_VEC_COUNT ?
357 FPGA_MAX_MSIX_VEC_COUNT : count) : 1;
358 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
359 VFIO_IRQ_SET_ACTION_TRIGGER;
360 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
361 irq_set->start = vec_start;
362
363 fd_ptr = (int *)&irq_set->data;
364 opae_memcpy(fd_ptr, fds, sizeof(int) * count);
365
366 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
367 if (ret)
368 printf("Error enabling MSI-X interrupts\n");
369
370 return ret;
371}
372
373int fpga_msix_set_block(struct ifpga_feature *feature, unsigned int start,
374 unsigned int count, s32 *fds)
375{
376 struct feature_irq_ctx *ctx = feature->ctx;
377 unsigned int i;
378 int ret;
379
380 if (start >= feature->ctx_num || start + count > feature->ctx_num)
381 return -EINVAL;
382
383 /* assume that each feature has continuous vector space in msix*/
384 ret = vfio_msix_enable_block(feature->vfio_dev_fd,
385 ctx[start].idx, count, fds);
386 if (!ret) {
387 for (i = 0; i < count; i++)
388 ctx[i].eventfd = fds[i];
389 }
390
391 return ret;
392}