]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/s390/cio/vfio_ccw_ops.c
Merge tag 'iio-for-4.13b' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[mirror_ubuntu-artful-kernel.git] / drivers / s390 / cio / vfio_ccw_ops.c
1 /*
2 * Physical device callbacks for vfio_ccw
3 *
4 * Copyright IBM Corp. 2017
5 *
6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
8 */
9
10 #include <linux/vfio.h>
11 #include <linux/mdev.h>
12
13 #include "vfio_ccw_private.h"
14
15 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
16 {
17 struct vfio_ccw_private *private;
18 struct subchannel *sch;
19 int ret;
20
21 private = dev_get_drvdata(mdev_parent_dev(mdev));
22 sch = private->sch;
23 /*
24 * TODO:
25 * In the cureent stage, some things like "no I/O running" and "no
26 * interrupt pending" are clear, but we are not sure what other state
27 * we need to care about.
28 * There are still a lot more instructions need to be handled. We
29 * should come back here later.
30 */
31 ret = vfio_ccw_sch_quiesce(sch);
32 if (ret)
33 return ret;
34
35 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
36 if (!ret)
37 private->state = VFIO_CCW_STATE_IDLE;
38
39 return ret;
40 }
41
42 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
43 unsigned long action,
44 void *data)
45 {
46 struct vfio_ccw_private *private =
47 container_of(nb, struct vfio_ccw_private, nb);
48
49 /*
50 * Vendor drivers MUST unpin pages in response to an
51 * invalidation.
52 */
53 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
54 struct vfio_iommu_type1_dma_unmap *unmap = data;
55
56 if (!cp_iova_pinned(&private->cp, unmap->iova))
57 return NOTIFY_OK;
58
59 if (vfio_ccw_mdev_reset(private->mdev))
60 return NOTIFY_BAD;
61
62 cp_free(&private->cp);
63 return NOTIFY_OK;
64 }
65
66 return NOTIFY_DONE;
67 }
68
69 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
70 {
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
72 }
73 static MDEV_TYPE_ATTR_RO(name);
74
75 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
76 char *buf)
77 {
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
79 }
80 static MDEV_TYPE_ATTR_RO(device_api);
81
82 static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf)
84 {
85 struct vfio_ccw_private *private = dev_get_drvdata(dev);
86
87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
88 }
89 static MDEV_TYPE_ATTR_RO(available_instances);
90
91 static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr,
93 &mdev_type_attr_device_api.attr,
94 &mdev_type_attr_available_instances.attr,
95 NULL,
96 };
97
98 static struct attribute_group mdev_type_group = {
99 .name = "io",
100 .attrs = mdev_types_attrs,
101 };
102
103 static struct attribute_group *mdev_type_groups[] = {
104 &mdev_type_group,
105 NULL,
106 };
107
108 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
109 {
110 struct vfio_ccw_private *private =
111 dev_get_drvdata(mdev_parent_dev(mdev));
112
113 if (private->state == VFIO_CCW_STATE_NOT_OPER)
114 return -ENODEV;
115
116 if (atomic_dec_if_positive(&private->avail) < 0)
117 return -EPERM;
118
119 private->mdev = mdev;
120 private->state = VFIO_CCW_STATE_IDLE;
121
122 return 0;
123 }
124
125 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
126 {
127 struct vfio_ccw_private *private =
128 dev_get_drvdata(mdev_parent_dev(mdev));
129
130 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
131 (private->state != VFIO_CCW_STATE_STANDBY)) {
132 if (!vfio_ccw_mdev_reset(mdev))
133 private->state = VFIO_CCW_STATE_STANDBY;
134 /* The state will be NOT_OPER on error. */
135 }
136
137 private->mdev = NULL;
138 atomic_inc(&private->avail);
139
140 return 0;
141 }
142
143 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
144 {
145 struct vfio_ccw_private *private =
146 dev_get_drvdata(mdev_parent_dev(mdev));
147 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
148
149 private->nb.notifier_call = vfio_ccw_mdev_notifier;
150
151 return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
152 &events, &private->nb);
153 }
154
155 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
156 {
157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev));
159
160 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
161 &private->nb);
162 }
163
164 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
165 char __user *buf,
166 size_t count,
167 loff_t *ppos)
168 {
169 struct vfio_ccw_private *private;
170 struct ccw_io_region *region;
171
172 if (*ppos + count > sizeof(*region))
173 return -EINVAL;
174
175 private = dev_get_drvdata(mdev_parent_dev(mdev));
176 region = &private->io_region;
177 if (copy_to_user(buf, (void *)region + *ppos, count))
178 return -EFAULT;
179
180 return count;
181 }
182
183 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
184 const char __user *buf,
185 size_t count,
186 loff_t *ppos)
187 {
188 struct vfio_ccw_private *private;
189 struct ccw_io_region *region;
190
191 if (*ppos + count > sizeof(*region))
192 return -EINVAL;
193
194 private = dev_get_drvdata(mdev_parent_dev(mdev));
195 if (private->state != VFIO_CCW_STATE_IDLE)
196 return -EACCES;
197
198 region = &private->io_region;
199 if (copy_from_user((void *)region + *ppos, buf, count))
200 return -EFAULT;
201
202 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
203 if (region->ret_code != 0) {
204 private->state = VFIO_CCW_STATE_IDLE;
205 return region->ret_code;
206 }
207
208 return count;
209 }
210
211 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
212 {
213 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
214 info->num_regions = VFIO_CCW_NUM_REGIONS;
215 info->num_irqs = VFIO_CCW_NUM_IRQS;
216
217 return 0;
218 }
219
220 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
221 u16 *cap_type_id,
222 void **cap_type)
223 {
224 switch (info->index) {
225 case VFIO_CCW_CONFIG_REGION_INDEX:
226 info->offset = 0;
227 info->size = sizeof(struct ccw_io_region);
228 info->flags = VFIO_REGION_INFO_FLAG_READ
229 | VFIO_REGION_INFO_FLAG_WRITE;
230 return 0;
231 default:
232 return -EINVAL;
233 }
234 }
235
236 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
237 {
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
239 return -EINVAL;
240
241 info->count = 1;
242 info->flags = VFIO_IRQ_INFO_EVENTFD;
243
244 return 0;
245 }
246
247 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
248 uint32_t flags,
249 void __user *data)
250 {
251 struct vfio_ccw_private *private;
252 struct eventfd_ctx **ctx;
253
254 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
255 return -EINVAL;
256
257 private = dev_get_drvdata(mdev_parent_dev(mdev));
258 ctx = &private->io_trigger;
259
260 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
261 case VFIO_IRQ_SET_DATA_NONE:
262 {
263 if (*ctx)
264 eventfd_signal(*ctx, 1);
265 return 0;
266 }
267 case VFIO_IRQ_SET_DATA_BOOL:
268 {
269 uint8_t trigger;
270
271 if (get_user(trigger, (uint8_t __user *)data))
272 return -EFAULT;
273
274 if (trigger && *ctx)
275 eventfd_signal(*ctx, 1);
276 return 0;
277 }
278 case VFIO_IRQ_SET_DATA_EVENTFD:
279 {
280 int32_t fd;
281
282 if (get_user(fd, (int32_t __user *)data))
283 return -EFAULT;
284
285 if (fd == -1) {
286 if (*ctx)
287 eventfd_ctx_put(*ctx);
288 *ctx = NULL;
289 } else if (fd >= 0) {
290 struct eventfd_ctx *efdctx;
291
292 efdctx = eventfd_ctx_fdget(fd);
293 if (IS_ERR(efdctx))
294 return PTR_ERR(efdctx);
295
296 if (*ctx)
297 eventfd_ctx_put(*ctx);
298
299 *ctx = efdctx;
300 } else
301 return -EINVAL;
302
303 return 0;
304 }
305 default:
306 return -EINVAL;
307 }
308 }
309
310 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
311 unsigned int cmd,
312 unsigned long arg)
313 {
314 int ret = 0;
315 unsigned long minsz;
316
317 switch (cmd) {
318 case VFIO_DEVICE_GET_INFO:
319 {
320 struct vfio_device_info info;
321
322 minsz = offsetofend(struct vfio_device_info, num_irqs);
323
324 if (copy_from_user(&info, (void __user *)arg, minsz))
325 return -EFAULT;
326
327 if (info.argsz < minsz)
328 return -EINVAL;
329
330 ret = vfio_ccw_mdev_get_device_info(&info);
331 if (ret)
332 return ret;
333
334 return copy_to_user((void __user *)arg, &info, minsz);
335 }
336 case VFIO_DEVICE_GET_REGION_INFO:
337 {
338 struct vfio_region_info info;
339 u16 cap_type_id = 0;
340 void *cap_type = NULL;
341
342 minsz = offsetofend(struct vfio_region_info, offset);
343
344 if (copy_from_user(&info, (void __user *)arg, minsz))
345 return -EFAULT;
346
347 if (info.argsz < minsz)
348 return -EINVAL;
349
350 ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
351 &cap_type);
352 if (ret)
353 return ret;
354
355 return copy_to_user((void __user *)arg, &info, minsz);
356 }
357 case VFIO_DEVICE_GET_IRQ_INFO:
358 {
359 struct vfio_irq_info info;
360
361 minsz = offsetofend(struct vfio_irq_info, count);
362
363 if (copy_from_user(&info, (void __user *)arg, minsz))
364 return -EFAULT;
365
366 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
367 return -EINVAL;
368
369 ret = vfio_ccw_mdev_get_irq_info(&info);
370 if (ret)
371 return ret;
372
373 if (info.count == -1)
374 return -EINVAL;
375
376 return copy_to_user((void __user *)arg, &info, minsz);
377 }
378 case VFIO_DEVICE_SET_IRQS:
379 {
380 struct vfio_irq_set hdr;
381 size_t data_size;
382 void __user *data;
383
384 minsz = offsetofend(struct vfio_irq_set, count);
385
386 if (copy_from_user(&hdr, (void __user *)arg, minsz))
387 return -EFAULT;
388
389 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
390 VFIO_CCW_NUM_IRQS,
391 &data_size);
392 if (ret)
393 return ret;
394
395 data = (void __user *)(arg + minsz);
396 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
397 }
398 case VFIO_DEVICE_RESET:
399 return vfio_ccw_mdev_reset(mdev);
400 default:
401 return -ENOTTY;
402 }
403 }
404
405 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
406 .owner = THIS_MODULE,
407 .supported_type_groups = mdev_type_groups,
408 .create = vfio_ccw_mdev_create,
409 .remove = vfio_ccw_mdev_remove,
410 .open = vfio_ccw_mdev_open,
411 .release = vfio_ccw_mdev_release,
412 .read = vfio_ccw_mdev_read,
413 .write = vfio_ccw_mdev_write,
414 .ioctl = vfio_ccw_mdev_ioctl,
415 };
416
417 int vfio_ccw_mdev_reg(struct subchannel *sch)
418 {
419 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
420 }
421
422 void vfio_ccw_mdev_unreg(struct subchannel *sch)
423 {
424 mdev_unregister_device(&sch->dev);
425 }