]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/idxd/idxd.h
Linux 5.10-rc1
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / idxd.h
CommitLineData
bfe1d560
DJ
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#ifndef _IDXD_H_
4#define _IDXD_H_
5
6#include <linux/sbitmap.h>
8f47d1a5 7#include <linux/dmaengine.h>
bfe1d560
DJ
8#include <linux/percpu-rwsem.h>
9#include <linux/wait.h>
42d279f9 10#include <linux/cdev.h>
bfe1d560
DJ
11#include "registers.h"
12
13#define IDXD_DRIVER_VERSION "1.00"
14
15extern struct kmem_cache *idxd_desc_pool;
16
17#define IDXD_REG_TIMEOUT 50
18#define IDXD_DRAIN_TIMEOUT 5000
19
20enum idxd_type {
21 IDXD_TYPE_UNKNOWN = -1,
22 IDXD_TYPE_DSA = 0,
23 IDXD_TYPE_MAX
24};
25
26#define IDXD_NAME_SIZE 128
27
28struct idxd_device_driver {
29 struct device_driver drv;
30};
31
32struct idxd_irq_entry {
33 struct idxd_device *idxd;
34 int id;
35 struct llist_head pending_llist;
36 struct list_head work_list;
37};
38
39struct idxd_group {
40 struct device conf_dev;
41 struct idxd_device *idxd;
42 struct grpcfg grpcfg;
43 int id;
44 int num_engines;
45 int num_wqs;
46 bool use_token_limit;
47 u8 tokens_allowed;
48 u8 tokens_reserved;
49 int tc_a;
50 int tc_b;
51};
52
53#define IDXD_MAX_PRIORITY 0xf
54
55enum idxd_wq_state {
56 IDXD_WQ_DISABLED = 0,
57 IDXD_WQ_ENABLED,
58};
59
60enum idxd_wq_flag {
61 WQ_FLAG_DEDICATED = 0,
62};
63
64enum idxd_wq_type {
65 IDXD_WQT_NONE = 0,
66 IDXD_WQT_KERNEL,
42d279f9
DJ
67 IDXD_WQT_USER,
68};
69
70struct idxd_cdev {
71 struct cdev cdev;
72 struct device *dev;
73 int minor;
74 struct wait_queue_head err_queue;
bfe1d560
DJ
75};
76
77#define IDXD_ALLOCATED_BATCH_SIZE 128U
78#define WQ_NAME_SIZE 1024
79#define WQ_TYPE_SIZE 10
80
d1dfe5b8
DJ
81enum idxd_op_type {
82 IDXD_OP_BLOCK = 0,
83 IDXD_OP_NONBLOCK = 1,
84};
85
8f47d1a5
DJ
86enum idxd_complete_type {
87 IDXD_COMPLETE_NORMAL = 0,
88 IDXD_COMPLETE_ABORT,
89};
90
bfe1d560
DJ
91struct idxd_wq {
92 void __iomem *dportal;
93 struct device conf_dev;
42d279f9 94 struct idxd_cdev idxd_cdev;
bfe1d560
DJ
95 struct idxd_device *idxd;
96 int id;
97 enum idxd_wq_type type;
98 struct idxd_group *group;
99 int client_count;
100 struct mutex wq_lock; /* mutex for workqueue */
101 u32 size;
102 u32 threshold;
103 u32 priority;
104 enum idxd_wq_state state;
105 unsigned long flags;
106 union wqcfg wqcfg;
bfe1d560
DJ
107 u32 vec_ptr; /* interrupt steering */
108 struct dsa_hw_desc **hw_descs;
109 int num_descs;
110 struct dsa_completion_record *compls;
111 dma_addr_t compls_addr;
112 int compls_size;
113 struct idxd_desc **descs;
0705107f 114 struct sbitmap_queue sbq;
8f47d1a5 115 struct dma_chan dma_chan;
bfe1d560 116 char name[WQ_NAME_SIZE + 1];
d7aad555 117 u64 max_xfer_bytes;
e7184b15 118 u32 max_batch_size;
bfe1d560
DJ
119};
120
121struct idxd_engine {
122 struct device conf_dev;
123 int id;
124 struct idxd_group *group;
125 struct idxd_device *idxd;
126};
127
128/* shadow registers */
129struct idxd_hw {
130 u32 version;
131 union gen_cap_reg gen_cap;
132 union wq_cap_reg wq_cap;
133 union group_cap_reg group_cap;
134 union engine_cap_reg engine_cap;
135 struct opcap opcap;
136};
137
138enum idxd_device_state {
139 IDXD_DEV_HALTED = -1,
140 IDXD_DEV_DISABLED = 0,
141 IDXD_DEV_CONF_READY,
142 IDXD_DEV_ENABLED,
143};
144
145enum idxd_device_flag {
146 IDXD_FLAG_CONFIGURABLE = 0,
0d5c10b4 147 IDXD_FLAG_CMD_RUNNING,
bfe1d560
DJ
148};
149
150struct idxd_device {
151 enum idxd_type type;
152 struct device conf_dev;
153 struct list_head list;
154 struct idxd_hw hw;
155 enum idxd_device_state state;
156 unsigned long flags;
157 int id;
42d279f9 158 int major;
ff18de55 159 u8 cmd_status;
bfe1d560
DJ
160
161 struct pci_dev *pdev;
162 void __iomem *reg_base;
163
164 spinlock_t dev_lock; /* spinlock for device */
0d5c10b4 165 struct completion *cmd_done;
bfe1d560
DJ
166 struct idxd_group *groups;
167 struct idxd_wq *wqs;
168 struct idxd_engine *engines;
169
170 int num_groups;
171
172 u32 msix_perm_offset;
173 u32 wqcfg_offset;
174 u32 grpcfg_offset;
175 u32 perfmon_offset;
176
177 u64 max_xfer_bytes;
178 u32 max_batch_size;
179 int max_groups;
180 int max_engines;
181 int max_tokens;
182 int max_wqs;
183 int max_wq_size;
184 int token_limit;
c52ca478 185 int nr_tokens; /* non-reserved tokens */
bfe1d560
DJ
186
187 union sw_err_reg sw_err;
0d5c10b4 188 wait_queue_head_t cmd_waitq;
bfe1d560
DJ
189 struct msix_entry *msix_entries;
190 int num_wq_irqs;
191 struct idxd_irq_entry *irq_entries;
8f47d1a5
DJ
192
193 struct dma_device dma_dev;
0d5c10b4
DJ
194 struct workqueue_struct *wq;
195 struct work_struct work;
bfe1d560
DJ
196};
197
198/* IDXD software descriptor */
199struct idxd_desc {
200 struct dsa_hw_desc *hw;
201 dma_addr_t desc_dma;
202 struct dsa_completion_record *completion;
203 dma_addr_t compl_dma;
8f47d1a5 204 struct dma_async_tx_descriptor txd;
bfe1d560
DJ
205 struct llist_node llnode;
206 struct list_head list;
207 int id;
0705107f 208 int cpu;
bfe1d560
DJ
209 struct idxd_wq *wq;
210};
211
212#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
213#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
214
42d279f9
DJ
215extern struct bus_type dsa_bus_type;
216
bfe1d560
DJ
217static inline bool wq_dedicated(struct idxd_wq *wq)
218{
219 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
220}
221
42d279f9
DJ
222enum idxd_portal_prot {
223 IDXD_PORTAL_UNLIMITED = 0,
224 IDXD_PORTAL_LIMITED,
225};
226
227static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
228{
229 return prot * 0x1000;
230}
231
232static inline int idxd_get_wq_portal_full_offset(int wq_id,
233 enum idxd_portal_prot prot)
234{
235 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
236}
237
bfe1d560
DJ
238static inline void idxd_set_type(struct idxd_device *idxd)
239{
240 struct pci_dev *pdev = idxd->pdev;
241
242 if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
243 idxd->type = IDXD_TYPE_DSA;
244 else
245 idxd->type = IDXD_TYPE_UNKNOWN;
246}
247
c52ca478
DJ
248static inline void idxd_wq_get(struct idxd_wq *wq)
249{
250 wq->client_count++;
251}
252
253static inline void idxd_wq_put(struct idxd_wq *wq)
254{
255 wq->client_count--;
256}
257
258static inline int idxd_wq_refcount(struct idxd_wq *wq)
259{
260 return wq->client_count;
261};
262
bfe1d560 263const char *idxd_get_dev_name(struct idxd_device *idxd);
c52ca478
DJ
264int idxd_register_bus_type(void);
265void idxd_unregister_bus_type(void);
266int idxd_setup_sysfs(struct idxd_device *idxd);
267void idxd_cleanup_sysfs(struct idxd_device *idxd);
268int idxd_register_driver(void);
269void idxd_unregister_driver(void);
42d279f9 270struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
bfe1d560
DJ
271
272/* device interrupt control */
273irqreturn_t idxd_irq_handler(int vec, void *data);
274irqreturn_t idxd_misc_thread(int vec, void *data);
275irqreturn_t idxd_wq_thread(int irq, void *data);
276void idxd_mask_error_interrupts(struct idxd_device *idxd);
277void idxd_unmask_error_interrupts(struct idxd_device *idxd);
278void idxd_mask_msix_vectors(struct idxd_device *idxd);
4548a6ad
DJ
279void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
280void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
bfe1d560
DJ
281
282/* device control */
0d5c10b4 283void idxd_device_init_reset(struct idxd_device *idxd);
bfe1d560
DJ
284int idxd_device_enable(struct idxd_device *idxd);
285int idxd_device_disable(struct idxd_device *idxd);
0d5c10b4 286void idxd_device_reset(struct idxd_device *idxd);
bfe1d560
DJ
287void idxd_device_cleanup(struct idxd_device *idxd);
288int idxd_device_config(struct idxd_device *idxd);
289void idxd_device_wqs_clear_state(struct idxd_device *idxd);
290
291/* work queue control */
292int idxd_wq_alloc_resources(struct idxd_wq *wq);
293void idxd_wq_free_resources(struct idxd_wq *wq);
294int idxd_wq_enable(struct idxd_wq *wq);
295int idxd_wq_disable(struct idxd_wq *wq);
0d5c10b4 296void idxd_wq_drain(struct idxd_wq *wq);
c52ca478
DJ
297int idxd_wq_map_portal(struct idxd_wq *wq);
298void idxd_wq_unmap_portal(struct idxd_wq *wq);
da32b28c 299void idxd_wq_disable_cleanup(struct idxd_wq *wq);
bfe1d560 300
d1dfe5b8
DJ
301/* submission */
302int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
303struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
304void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
305
8f47d1a5
DJ
306/* dmaengine */
307int idxd_register_dma_device(struct idxd_device *idxd);
308void idxd_unregister_dma_device(struct idxd_device *idxd);
309int idxd_register_dma_channel(struct idxd_wq *wq);
310void idxd_unregister_dma_channel(struct idxd_wq *wq);
311void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
312void idxd_dma_complete_txd(struct idxd_desc *desc,
313 enum idxd_complete_type comp_type);
314dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
315
42d279f9
DJ
316/* cdev */
317int idxd_cdev_register(void);
318void idxd_cdev_remove(void);
319int idxd_cdev_get_major(struct idxd_device *idxd);
320int idxd_wq_add_cdev(struct idxd_wq *wq);
321void idxd_wq_del_cdev(struct idxd_wq *wq);
322
bfe1d560 323#endif