]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/s390/kvm/virtio_ccw.c
Merge tag 'kvm-for-3.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm...
[mirror_ubuntu-bionic-kernel.git] / drivers / s390 / kvm / virtio_ccw.c
CommitLineData
7e64e059
CH
1/*
2 * ccw based virtio transport
3 *
96b14536 4 * Copyright IBM Corp. 2012, 2014
7e64e059
CH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16#include <linux/err.h>
17#include <linux/virtio.h>
18#include <linux/virtio_config.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <linux/virtio_ring.h>
22#include <linux/pfn.h>
23#include <linux/async.h>
24#include <linux/wait.h>
25#include <linux/list.h>
26#include <linux/bitops.h>
27#include <linux/module.h>
28#include <linux/io.h>
29#include <linux/kvm_para.h>
30#include <asm/setup.h>
31#include <asm/irq.h>
32#include <asm/cio.h>
33#include <asm/ccwdev.h>
6a773cb8 34#include <asm/virtio-ccw.h>
96b14536
CH
35#include <asm/isc.h>
36#include <asm/airq.h>
7e64e059
CH
37
38/*
39 * virtio related functions
40 */
41
42struct vq_config_block {
43 __u16 index;
44 __u16 num;
45} __packed;
46
47#define VIRTIO_CCW_CONFIG_SIZE 0x100
48/* same as PCI config space size, should be enough for all drivers */
49
50struct virtio_ccw_device {
51 struct virtio_device vdev;
73fa21ea 52 __u8 *status;
7e64e059
CH
53 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
54 struct ccw_device *cdev;
7e64e059
CH
55 __u32 curr_io;
56 int err;
57 wait_queue_head_t wait_q;
58 spinlock_t lock;
59 struct list_head virtqueues;
60 unsigned long indicators;
61 unsigned long indicators2;
62 struct vq_config_block *config_block;
96b14536
CH
63 bool is_thinint;
64 void *airq_info;
7e64e059
CH
65};
66
67struct vq_info_block {
68 __u64 queue;
69 __u32 align;
70 __u16 index;
71 __u16 num;
72} __packed;
73
74struct virtio_feature_desc {
75 __u32 features;
76 __u8 index;
77} __packed;
78
96b14536
CH
79struct virtio_thinint_area {
80 unsigned long summary_indicator;
81 unsigned long indicator;
82 u64 bit_nr;
83 u8 isc;
84} __packed;
85
7e64e059
CH
86struct virtio_ccw_vq_info {
87 struct virtqueue *vq;
88 int num;
89 void *queue;
90 struct vq_info_block *info_block;
96b14536 91 int bit_nr;
7e64e059 92 struct list_head node;
07e16933 93 long cookie;
7e64e059
CH
94};
95
96b14536
CH
96#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
97
98#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
99#define MAX_AIRQ_AREAS 20
100
101static int virtio_ccw_use_airq = 1;
102
103struct airq_info {
104 rwlock_t lock;
105 u8 summary_indicator;
106 struct airq_struct airq;
107 struct airq_iv *aiv;
108};
109static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
110
7e64e059
CH
111#define CCW_CMD_SET_VQ 0x13
112#define CCW_CMD_VDEV_RESET 0x33
113#define CCW_CMD_SET_IND 0x43
114#define CCW_CMD_SET_CONF_IND 0x53
115#define CCW_CMD_READ_FEAT 0x12
116#define CCW_CMD_WRITE_FEAT 0x11
117#define CCW_CMD_READ_CONF 0x22
118#define CCW_CMD_WRITE_CONF 0x21
119#define CCW_CMD_WRITE_STATUS 0x31
120#define CCW_CMD_READ_VQ_CONF 0x32
96b14536 121#define CCW_CMD_SET_IND_ADAPTER 0x73
7e64e059
CH
122
123#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
124#define VIRTIO_CCW_DOING_RESET 0x00040000
125#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
126#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
127#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
128#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
129#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
130#define VIRTIO_CCW_DOING_SET_IND 0x01000000
131#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
132#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
96b14536 133#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
7e64e059
CH
134#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
135
136static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
137{
138 return container_of(vdev, struct virtio_ccw_device, vdev);
139}
140
96b14536
CH
141static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
142{
143 unsigned long i, flags;
144
145 write_lock_irqsave(&info->lock, flags);
146 for (i = 0; i < airq_iv_end(info->aiv); i++) {
147 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
148 airq_iv_free_bit(info->aiv, i);
149 airq_iv_set_ptr(info->aiv, i, 0);
150 break;
151 }
152 }
153 write_unlock_irqrestore(&info->lock, flags);
154}
155
156static void virtio_airq_handler(struct airq_struct *airq)
157{
158 struct airq_info *info = container_of(airq, struct airq_info, airq);
159 unsigned long ai;
160
161 inc_irq_stat(IRQIO_VAI);
162 read_lock(&info->lock);
163 /* Walk through indicators field, summary indicator active. */
164 for (ai = 0;;) {
165 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
166 if (ai == -1UL)
167 break;
168 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
169 }
170 info->summary_indicator = 0;
171 smp_wmb();
172 /* Walk through indicators field, summary indicator not active. */
173 for (ai = 0;;) {
174 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
175 if (ai == -1UL)
176 break;
177 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
178 }
179 read_unlock(&info->lock);
180}
181
182static struct airq_info *new_airq_info(void)
183{
184 struct airq_info *info;
185 int rc;
186
187 info = kzalloc(sizeof(*info), GFP_KERNEL);
188 if (!info)
189 return NULL;
190 rwlock_init(&info->lock);
191 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
192 if (!info->aiv) {
193 kfree(info);
194 return NULL;
195 }
196 info->airq.handler = virtio_airq_handler;
197 info->airq.lsi_ptr = &info->summary_indicator;
198 info->airq.lsi_mask = 0xff;
199 info->airq.isc = VIRTIO_AIRQ_ISC;
200 rc = register_adapter_interrupt(&info->airq);
201 if (rc) {
202 airq_iv_release(info->aiv);
203 kfree(info);
204 return NULL;
205 }
206 return info;
207}
208
209static void destroy_airq_info(struct airq_info *info)
210{
211 if (!info)
212 return;
213
214 unregister_adapter_interrupt(&info->airq);
215 airq_iv_release(info->aiv);
216 kfree(info);
217}
218
219static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
220 u64 *first, void **airq_info)
221{
222 int i, j;
223 struct airq_info *info;
224 unsigned long indicator_addr = 0;
225 unsigned long bit, flags;
226
227 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
228 if (!airq_areas[i])
229 airq_areas[i] = new_airq_info();
230 info = airq_areas[i];
231 if (!info)
232 return 0;
233 write_lock_irqsave(&info->lock, flags);
234 bit = airq_iv_alloc(info->aiv, nvqs);
235 if (bit == -1UL) {
236 /* Not enough vacancies. */
237 write_unlock_irqrestore(&info->lock, flags);
238 continue;
239 }
240 *first = bit;
241 *airq_info = info;
242 indicator_addr = (unsigned long)info->aiv->vector;
243 for (j = 0; j < nvqs; j++) {
244 airq_iv_set_ptr(info->aiv, bit + j,
245 (unsigned long)vqs[j]);
246 }
247 write_unlock_irqrestore(&info->lock, flags);
248 }
249 return indicator_addr;
250}
251
252static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
253{
254 struct virtio_ccw_vq_info *info;
255
256 list_for_each_entry(info, &vcdev->virtqueues, node)
257 drop_airq_indicator(info->vq, vcdev->airq_info);
258}
259
7e64e059
CH
260static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
261{
262 unsigned long flags;
263 __u32 ret;
264
265 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
266 if (vcdev->err)
267 ret = 0;
268 else
269 ret = vcdev->curr_io & flag;
270 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
271 return ret;
272}
273
73fa21ea
CH
274static int ccw_io_helper(struct virtio_ccw_device *vcdev,
275 struct ccw1 *ccw, __u32 intparm)
7e64e059
CH
276{
277 int ret;
278 unsigned long flags;
279 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
280
b26ba22b
CB
281 do {
282 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
283 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
99437a27
CH
284 if (!ret) {
285 if (!vcdev->curr_io)
286 vcdev->err = 0;
b26ba22b 287 vcdev->curr_io |= flag;
99437a27 288 }
b26ba22b
CB
289 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
290 cpu_relax();
291 } while (ret == -EBUSY);
7e64e059
CH
292 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
293 return ret ? ret : vcdev->err;
294}
295
96b14536
CH
296static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
297 struct ccw1 *ccw)
298{
299 int ret;
300 unsigned long *indicatorp = NULL;
301 struct virtio_thinint_area *thinint_area = NULL;
302 struct airq_info *airq_info = vcdev->airq_info;
303
304 if (vcdev->is_thinint) {
305 thinint_area = kzalloc(sizeof(*thinint_area),
306 GFP_DMA | GFP_KERNEL);
307 if (!thinint_area)
308 return;
309 thinint_area->summary_indicator =
310 (unsigned long) &airq_info->summary_indicator;
311 thinint_area->isc = VIRTIO_AIRQ_ISC;
312 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
313 ccw->count = sizeof(*thinint_area);
314 ccw->cda = (__u32)(unsigned long) thinint_area;
315 } else {
316 indicatorp = kmalloc(sizeof(&vcdev->indicators),
317 GFP_DMA | GFP_KERNEL);
318 if (!indicatorp)
319 return;
320 *indicatorp = 0;
321 ccw->cmd_code = CCW_CMD_SET_IND;
322 ccw->count = sizeof(vcdev->indicators);
323 ccw->cda = (__u32)(unsigned long) indicatorp;
324 }
325 /* Deregister indicators from host. */
326 vcdev->indicators = 0;
327 ccw->flags = 0;
328 ret = ccw_io_helper(vcdev, ccw,
329 vcdev->is_thinint ?
330 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
331 VIRTIO_CCW_DOING_SET_IND);
332 if (ret && (ret != -ENODEV))
333 dev_info(&vcdev->cdev->dev,
334 "Failed to deregister indicators (%d)\n", ret);
335 else if (vcdev->is_thinint)
336 virtio_ccw_drop_indicators(vcdev);
337 kfree(indicatorp);
338 kfree(thinint_area);
339}
340
7e64e059 341static inline long do_kvm_notify(struct subchannel_id schid,
07e16933
MT
342 unsigned long queue_index,
343 long cookie)
7e64e059
CH
344{
345 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
346 register struct subchannel_id __schid asm("2") = schid;
347 register unsigned long __index asm("3") = queue_index;
348 register long __rc asm("2");
07e16933 349 register long __cookie asm("4") = cookie;
7e64e059
CH
350
351 asm volatile ("diag 2,4,0x500\n"
07e16933
MT
352 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
353 "d"(__cookie)
7e64e059
CH
354 : "memory", "cc");
355 return __rc;
356}
357
46f9c2b9 358static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
7e64e059
CH
359{
360 struct virtio_ccw_vq_info *info = vq->priv;
361 struct virtio_ccw_device *vcdev;
362 struct subchannel_id schid;
363
364 vcdev = to_vc_device(info->vq->vdev);
365 ccw_device_get_schid(vcdev->cdev, &schid);
01227a88 366 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
46f9c2b9
HG
367 if (info->cookie < 0)
368 return false;
369 return true;
7e64e059
CH
370}
371
73fa21ea
CH
372static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
373 struct ccw1 *ccw, int index)
7e64e059
CH
374{
375 vcdev->config_block->index = index;
73fa21ea
CH
376 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
377 ccw->flags = 0;
378 ccw->count = sizeof(struct vq_config_block);
379 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
380 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
7e64e059
CH
381 return vcdev->config_block->num;
382}
383
73fa21ea 384static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
7e64e059
CH
385{
386 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
387 struct virtio_ccw_vq_info *info = vq->priv;
388 unsigned long flags;
389 unsigned long size;
390 int ret;
9d0ca6ed 391 unsigned int index = vq->index;
7e64e059
CH
392
393 /* Remove from our list. */
394 spin_lock_irqsave(&vcdev->lock, flags);
395 list_del(&info->node);
396 spin_unlock_irqrestore(&vcdev->lock, flags);
397
398 /* Release from host. */
399 info->info_block->queue = 0;
400 info->info_block->align = 0;
401 info->info_block->index = index;
402 info->info_block->num = 0;
73fa21ea
CH
403 ccw->cmd_code = CCW_CMD_SET_VQ;
404 ccw->flags = 0;
405 ccw->count = sizeof(*info->info_block);
406 ccw->cda = (__u32)(unsigned long)(info->info_block);
407 ret = ccw_io_helper(vcdev, ccw,
408 VIRTIO_CCW_DOING_SET_VQ | index);
7e64e059
CH
409 /*
410 * -ENODEV isn't considered an error: The device is gone anyway.
411 * This may happen on device detach.
412 */
413 if (ret && (ret != -ENODEV))
414 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
415 ret, index);
416
417 vring_del_virtqueue(vq);
418 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
419 free_pages_exact(info->queue, size);
420 kfree(info->info_block);
421 kfree(info);
422}
423
424static void virtio_ccw_del_vqs(struct virtio_device *vdev)
425{
426 struct virtqueue *vq, *n;
73fa21ea 427 struct ccw1 *ccw;
96b14536 428 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
429
430 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
431 if (!ccw)
432 return;
433
96b14536 434 virtio_ccw_drop_indicator(vcdev, ccw);
7e64e059
CH
435
436 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
73fa21ea
CH
437 virtio_ccw_del_vq(vq, ccw);
438
439 kfree(ccw);
7e64e059
CH
440}
441
442static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
443 int i, vq_callback_t *callback,
73fa21ea
CH
444 const char *name,
445 struct ccw1 *ccw)
7e64e059
CH
446{
447 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
448 int err;
c98d3683 449 struct virtqueue *vq = NULL;
7e64e059 450 struct virtio_ccw_vq_info *info;
c98d3683 451 unsigned long size = 0; /* silence the compiler */
7e64e059
CH
452 unsigned long flags;
453
454 /* Allocate queue. */
455 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
456 if (!info) {
457 dev_warn(&vcdev->cdev->dev, "no info\n");
458 err = -ENOMEM;
459 goto out_err;
460 }
461 info->info_block = kzalloc(sizeof(*info->info_block),
462 GFP_DMA | GFP_KERNEL);
463 if (!info->info_block) {
464 dev_warn(&vcdev->cdev->dev, "no info block\n");
465 err = -ENOMEM;
466 goto out_err;
467 }
73fa21ea 468 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
7e64e059
CH
469 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
470 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
471 if (info->queue == NULL) {
472 dev_warn(&vcdev->cdev->dev, "no queue\n");
473 err = -ENOMEM;
474 goto out_err;
475 }
476
477 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
478 true, info->queue, virtio_ccw_kvm_notify,
479 callback, name);
480 if (!vq) {
481 /* For now, we fail if we can't get the requested size. */
482 dev_warn(&vcdev->cdev->dev, "no vq\n");
483 err = -ENOMEM;
7e64e059
CH
484 goto out_err;
485 }
7e64e059
CH
486
487 /* Register it with the host. */
488 info->info_block->queue = (__u64)info->queue;
489 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
490 info->info_block->index = i;
491 info->info_block->num = info->num;
73fa21ea
CH
492 ccw->cmd_code = CCW_CMD_SET_VQ;
493 ccw->flags = 0;
494 ccw->count = sizeof(*info->info_block);
495 ccw->cda = (__u32)(unsigned long)(info->info_block);
496 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
7e64e059
CH
497 if (err) {
498 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
7e64e059
CH
499 goto out_err;
500 }
501
c98d3683
CH
502 info->vq = vq;
503 vq->priv = info;
504
7e64e059
CH
505 /* Save it to our list. */
506 spin_lock_irqsave(&vcdev->lock, flags);
507 list_add(&info->node, &vcdev->virtqueues);
508 spin_unlock_irqrestore(&vcdev->lock, flags);
509
510 return vq;
511
512out_err:
c98d3683
CH
513 if (vq)
514 vring_del_virtqueue(vq);
515 if (info) {
516 if (info->queue)
517 free_pages_exact(info->queue, size);
7e64e059 518 kfree(info->info_block);
c98d3683 519 }
7e64e059
CH
520 kfree(info);
521 return ERR_PTR(err);
522}
523
96b14536
CH
524static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
525 struct virtqueue *vqs[], int nvqs,
526 struct ccw1 *ccw)
527{
528 int ret;
529 struct virtio_thinint_area *thinint_area = NULL;
530 struct airq_info *info;
531
532 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
533 if (!thinint_area) {
534 ret = -ENOMEM;
535 goto out;
536 }
537 /* Try to get an indicator. */
538 thinint_area->indicator = get_airq_indicator(vqs, nvqs,
539 &thinint_area->bit_nr,
540 &vcdev->airq_info);
541 if (!thinint_area->indicator) {
542 ret = -ENOSPC;
543 goto out;
544 }
545 info = vcdev->airq_info;
546 thinint_area->summary_indicator =
547 (unsigned long) &info->summary_indicator;
548 thinint_area->isc = VIRTIO_AIRQ_ISC;
549 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
550 ccw->flags = CCW_FLAG_SLI;
551 ccw->count = sizeof(*thinint_area);
552 ccw->cda = (__u32)(unsigned long)thinint_area;
553 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
554 if (ret) {
555 if (ret == -EOPNOTSUPP) {
556 /*
557 * The host does not support adapter interrupts
558 * for virtio-ccw, stop trying.
559 */
560 virtio_ccw_use_airq = 0;
561 pr_info("Adapter interrupts unsupported on host\n");
562 } else
563 dev_warn(&vcdev->cdev->dev,
564 "enabling adapter interrupts = %d\n", ret);
565 virtio_ccw_drop_indicators(vcdev);
566 }
567out:
568 kfree(thinint_area);
569 return ret;
570}
571
7e64e059
CH
572static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
573 struct virtqueue *vqs[],
574 vq_callback_t *callbacks[],
575 const char *names[])
576{
577 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
578 unsigned long *indicatorp = NULL;
579 int ret, i;
73fa21ea
CH
580 struct ccw1 *ccw;
581
582 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
583 if (!ccw)
584 return -ENOMEM;
7e64e059
CH
585
586 for (i = 0; i < nvqs; ++i) {
73fa21ea
CH
587 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
588 ccw);
7e64e059
CH
589 if (IS_ERR(vqs[i])) {
590 ret = PTR_ERR(vqs[i]);
591 vqs[i] = NULL;
592 goto out;
593 }
594 }
595 ret = -ENOMEM;
596 /* We need a data area under 2G to communicate. */
597 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
598 if (!indicatorp)
599 goto out;
600 *indicatorp = (unsigned long) &vcdev->indicators;
96b14536
CH
601 if (vcdev->is_thinint) {
602 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
603 if (ret)
604 /* no error, just fall back to legacy interrupts */
605 vcdev->is_thinint = 0;
606 }
607 if (!vcdev->is_thinint) {
608 /* Register queue indicators with host. */
609 vcdev->indicators = 0;
610 ccw->cmd_code = CCW_CMD_SET_IND;
611 ccw->flags = 0;
612 ccw->count = sizeof(vcdev->indicators);
613 ccw->cda = (__u32)(unsigned long) indicatorp;
614 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
615 if (ret)
616 goto out;
617 }
7e64e059
CH
618 /* Register indicators2 with host for config changes */
619 *indicatorp = (unsigned long) &vcdev->indicators2;
620 vcdev->indicators2 = 0;
73fa21ea
CH
621 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
622 ccw->flags = 0;
623 ccw->count = sizeof(vcdev->indicators2);
624 ccw->cda = (__u32)(unsigned long) indicatorp;
625 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
7e64e059
CH
626 if (ret)
627 goto out;
628
629 kfree(indicatorp);
73fa21ea 630 kfree(ccw);
7e64e059
CH
631 return 0;
632out:
633 kfree(indicatorp);
73fa21ea 634 kfree(ccw);
7e64e059
CH
635 virtio_ccw_del_vqs(vdev);
636 return ret;
637}
638
639static void virtio_ccw_reset(struct virtio_device *vdev)
640{
641 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
642 struct ccw1 *ccw;
643
644 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
645 if (!ccw)
646 return;
7e64e059
CH
647
648 /* Zero status bits. */
73fa21ea 649 *vcdev->status = 0;
7e64e059
CH
650
651 /* Send a reset ccw on device. */
73fa21ea
CH
652 ccw->cmd_code = CCW_CMD_VDEV_RESET;
653 ccw->flags = 0;
654 ccw->count = 0;
655 ccw->cda = 0;
656 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
657 kfree(ccw);
7e64e059
CH
658}
659
660static u32 virtio_ccw_get_features(struct virtio_device *vdev)
661{
662 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
663 struct virtio_feature_desc *features;
664 int ret, rc;
665 struct ccw1 *ccw;
7e64e059 666
73fa21ea
CH
667 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
668 if (!ccw)
669 return 0;
670
671 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
672 if (!features) {
673 rc = 0;
674 goto out_free;
675 }
7e64e059
CH
676 /* Read the feature bits from the host. */
677 /* TODO: Features > 32 bits */
73fa21ea
CH
678 features->index = 0;
679 ccw->cmd_code = CCW_CMD_READ_FEAT;
680 ccw->flags = 0;
681 ccw->count = sizeof(*features);
682 ccw->cda = (__u32)(unsigned long)features;
683 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
684 if (ret) {
685 rc = 0;
686 goto out_free;
687 }
688
689 rc = le32_to_cpu(features->features);
7e64e059 690
73fa21ea
CH
691out_free:
692 kfree(features);
693 kfree(ccw);
694 return rc;
7e64e059
CH
695}
696
697static void virtio_ccw_finalize_features(struct virtio_device *vdev)
698{
699 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea 700 struct virtio_feature_desc *features;
7e64e059 701 int i;
73fa21ea
CH
702 struct ccw1 *ccw;
703
704 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
705 if (!ccw)
706 return;
707
708 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
709 if (!features)
710 goto out_free;
7e64e059
CH
711
712 /* Give virtio_ring a chance to accept features. */
713 vring_transport_features(vdev);
714
73fa21ea 715 for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
7e64e059
CH
716 i++) {
717 int highbits = i % 2 ? 32 : 0;
73fa21ea
CH
718 features->index = i;
719 features->features = cpu_to_le32(vdev->features[i / 2]
720 >> highbits);
7e64e059 721 /* Write the feature bits to the host. */
73fa21ea
CH
722 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
723 ccw->flags = 0;
724 ccw->count = sizeof(*features);
725 ccw->cda = (__u32)(unsigned long)features;
726 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
7e64e059 727 }
73fa21ea
CH
728out_free:
729 kfree(features);
730 kfree(ccw);
7e64e059
CH
731}
732
733static void virtio_ccw_get_config(struct virtio_device *vdev,
734 unsigned int offset, void *buf, unsigned len)
735{
736 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
737 int ret;
73fa21ea
CH
738 struct ccw1 *ccw;
739 void *config_area;
740
741 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
742 if (!ccw)
743 return;
744
745 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
746 if (!config_area)
747 goto out_free;
7e64e059
CH
748
749 /* Read the config area from the host. */
73fa21ea
CH
750 ccw->cmd_code = CCW_CMD_READ_CONF;
751 ccw->flags = 0;
752 ccw->count = offset + len;
753 ccw->cda = (__u32)(unsigned long)config_area;
754 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
7e64e059 755 if (ret)
73fa21ea 756 goto out_free;
7e64e059 757
73fa21ea 758 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
7e64e059 759 memcpy(buf, &vcdev->config[offset], len);
73fa21ea
CH
760
761out_free:
762 kfree(config_area);
763 kfree(ccw);
7e64e059
CH
764}
765
766static void virtio_ccw_set_config(struct virtio_device *vdev,
767 unsigned int offset, const void *buf,
768 unsigned len)
769{
770 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
771 struct ccw1 *ccw;
772 void *config_area;
773
774 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
775 if (!ccw)
776 return;
777
778 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
779 if (!config_area)
780 goto out_free;
7e64e059
CH
781
782 memcpy(&vcdev->config[offset], buf, len);
783 /* Write the config area to the host. */
73fa21ea
CH
784 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
785 ccw->cmd_code = CCW_CMD_WRITE_CONF;
786 ccw->flags = 0;
787 ccw->count = offset + len;
788 ccw->cda = (__u32)(unsigned long)config_area;
789 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
790
791out_free:
792 kfree(config_area);
793 kfree(ccw);
7e64e059
CH
794}
795
796static u8 virtio_ccw_get_status(struct virtio_device *vdev)
797{
798 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
799
73fa21ea 800 return *vcdev->status;
7e64e059
CH
801}
802
803static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
804{
805 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
806 struct ccw1 *ccw;
807
808 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
809 if (!ccw)
810 return;
7e64e059
CH
811
812 /* Write the status to the host. */
73fa21ea
CH
813 *vcdev->status = status;
814 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
815 ccw->flags = 0;
816 ccw->count = sizeof(status);
817 ccw->cda = (__u32)(unsigned long)vcdev->status;
818 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
819 kfree(ccw);
7e64e059
CH
820}
821
822static struct virtio_config_ops virtio_ccw_config_ops = {
823 .get_features = virtio_ccw_get_features,
824 .finalize_features = virtio_ccw_finalize_features,
825 .get = virtio_ccw_get_config,
826 .set = virtio_ccw_set_config,
827 .get_status = virtio_ccw_get_status,
828 .set_status = virtio_ccw_set_status,
829 .reset = virtio_ccw_reset,
830 .find_vqs = virtio_ccw_find_vqs,
831 .del_vqs = virtio_ccw_del_vqs,
832};
833
834
835/*
836 * ccw bus driver related functions
837 */
838
839static void virtio_ccw_release_dev(struct device *_d)
840{
841 struct virtio_device *dev = container_of(_d, struct virtio_device,
842 dev);
843 struct virtio_ccw_device *vcdev = to_vc_device(dev);
844
73fa21ea 845 kfree(vcdev->status);
7e64e059 846 kfree(vcdev->config_block);
7e64e059
CH
847 kfree(vcdev);
848}
849
850static int irb_is_error(struct irb *irb)
851{
852 if (scsw_cstat(&irb->scsw) != 0)
853 return 1;
854 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
855 return 1;
856 if (scsw_cc(&irb->scsw) != 0)
857 return 1;
858 return 0;
859}
860
861static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
862 int index)
863{
864 struct virtio_ccw_vq_info *info;
865 unsigned long flags;
866 struct virtqueue *vq;
867
868 vq = NULL;
869 spin_lock_irqsave(&vcdev->lock, flags);
870 list_for_each_entry(info, &vcdev->virtqueues, node) {
9d0ca6ed 871 if (info->vq->index == index) {
7e64e059
CH
872 vq = info->vq;
873 break;
874 }
875 }
876 spin_unlock_irqrestore(&vcdev->lock, flags);
877 return vq;
878}
879
880static void virtio_ccw_int_handler(struct ccw_device *cdev,
881 unsigned long intparm,
882 struct irb *irb)
883{
884 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
885 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
886 int i;
887 struct virtqueue *vq;
888 struct virtio_driver *drv;
889
2e021043
HG
890 if (!vcdev)
891 return;
7e64e059
CH
892 /* Check if it's a notification from the host. */
893 if ((intparm == 0) &&
894 (scsw_stctl(&irb->scsw) ==
895 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
896 /* OK */
897 }
19e4735b
CH
898 if (irb_is_error(irb)) {
899 /* Command reject? */
900 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
901 (irb->ecw[0] & SNS0_CMD_REJECT))
902 vcdev->err = -EOPNOTSUPP;
903 else
904 /* Map everything else to -EIO. */
905 vcdev->err = -EIO;
906 }
7e64e059
CH
907 if (vcdev->curr_io & activity) {
908 switch (activity) {
909 case VIRTIO_CCW_DOING_READ_FEAT:
910 case VIRTIO_CCW_DOING_WRITE_FEAT:
911 case VIRTIO_CCW_DOING_READ_CONFIG:
912 case VIRTIO_CCW_DOING_WRITE_CONFIG:
913 case VIRTIO_CCW_DOING_WRITE_STATUS:
914 case VIRTIO_CCW_DOING_SET_VQ:
915 case VIRTIO_CCW_DOING_SET_IND:
916 case VIRTIO_CCW_DOING_SET_CONF_IND:
917 case VIRTIO_CCW_DOING_RESET:
918 case VIRTIO_CCW_DOING_READ_VQ_CONF:
96b14536 919 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
7e64e059
CH
920 vcdev->curr_io &= ~activity;
921 wake_up(&vcdev->wait_q);
922 break;
923 default:
924 /* don't know what to do... */
925 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
926 activity);
927 WARN_ON(1);
928 break;
929 }
930 }
931 for_each_set_bit(i, &vcdev->indicators,
932 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
933 /* The bit clear must happen before the vring kick. */
934 clear_bit(i, &vcdev->indicators);
935 barrier();
936 vq = virtio_ccw_vq_by_ind(vcdev, i);
937 vring_interrupt(0, vq);
938 }
939 if (test_bit(0, &vcdev->indicators2)) {
940 drv = container_of(vcdev->vdev.dev.driver,
941 struct virtio_driver, driver);
942
943 if (drv && drv->config_changed)
944 drv->config_changed(&vcdev->vdev);
945 clear_bit(0, &vcdev->indicators2);
946 }
947}
948
949/*
950 * We usually want to autoonline all devices, but give the admin
951 * a way to exempt devices from this.
952 */
953#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
954 (8*sizeof(long)))
955static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
956
957static char *no_auto = "";
958
959module_param(no_auto, charp, 0444);
960MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
961
962static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
963{
964 struct ccw_dev_id id;
965
966 ccw_device_get_id(cdev, &id);
967 if (test_bit(id.devno, devs_no_auto[id.ssid]))
968 return 0;
969 return 1;
970}
971
972static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
973{
974 struct ccw_device *cdev = data;
975 int ret;
976
977 ret = ccw_device_set_online(cdev);
978 if (ret)
979 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
980}
981
982static int virtio_ccw_probe(struct ccw_device *cdev)
983{
984 cdev->handler = virtio_ccw_int_handler;
985
986 if (virtio_ccw_check_autoonline(cdev))
987 async_schedule(virtio_ccw_auto_online, cdev);
988 return 0;
989}
990
2e021043
HG
991static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
992{
993 unsigned long flags;
994 struct virtio_ccw_device *vcdev;
995
996 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
997 vcdev = dev_get_drvdata(&cdev->dev);
998 if (!vcdev) {
999 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1000 return NULL;
1001 }
1002 dev_set_drvdata(&cdev->dev, NULL);
1003 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1004 return vcdev;
1005}
1006
7e64e059
CH
1007static void virtio_ccw_remove(struct ccw_device *cdev)
1008{
2e021043 1009 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1010
2e021043 1011 if (vcdev && cdev->online)
7e64e059 1012 unregister_virtio_device(&vcdev->vdev);
7e64e059
CH
1013 cdev->handler = NULL;
1014}
1015
1016static int virtio_ccw_offline(struct ccw_device *cdev)
1017{
2e021043 1018 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1019
2e021043
HG
1020 if (vcdev)
1021 unregister_virtio_device(&vcdev->vdev);
7e64e059
CH
1022 return 0;
1023}
1024
1025
7e64e059
CH
1026static int virtio_ccw_online(struct ccw_device *cdev)
1027{
1028 int ret;
1029 struct virtio_ccw_device *vcdev;
2e021043 1030 unsigned long flags;
7e64e059
CH
1031
1032 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
1033 if (!vcdev) {
1034 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1035 ret = -ENOMEM;
1036 goto out_free;
1037 }
7e64e059
CH
1038 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
1039 GFP_DMA | GFP_KERNEL);
1040 if (!vcdev->config_block) {
1041 ret = -ENOMEM;
1042 goto out_free;
1043 }
73fa21ea
CH
1044 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
1045 if (!vcdev->status) {
7e64e059
CH
1046 ret = -ENOMEM;
1047 goto out_free;
1048 }
1049
96b14536
CH
1050 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1051
7e64e059
CH
1052 vcdev->vdev.dev.parent = &cdev->dev;
1053 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1054 vcdev->vdev.config = &virtio_ccw_config_ops;
1055 vcdev->cdev = cdev;
1056 init_waitqueue_head(&vcdev->wait_q);
1057 INIT_LIST_HEAD(&vcdev->virtqueues);
1058 spin_lock_init(&vcdev->lock);
1059
2e021043 1060 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1061 dev_set_drvdata(&cdev->dev, vcdev);
2e021043 1062 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1063 vcdev->vdev.id.vendor = cdev->id.cu_type;
1064 vcdev->vdev.id.device = cdev->id.cu_model;
1065 ret = register_virtio_device(&vcdev->vdev);
1066 if (ret) {
1067 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1068 ret);
1069 goto out_put;
1070 }
1071 return 0;
1072out_put:
2e021043 1073 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1074 dev_set_drvdata(&cdev->dev, NULL);
2e021043 1075 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1076 put_device(&vcdev->vdev.dev);
1077 return ret;
1078out_free:
1079 if (vcdev) {
73fa21ea 1080 kfree(vcdev->status);
7e64e059 1081 kfree(vcdev->config_block);
7e64e059
CH
1082 }
1083 kfree(vcdev);
1084 return ret;
1085}
1086
1087static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1088{
1089 /* TODO: Check whether we need special handling here. */
1090 return 0;
1091}
1092
1093static struct ccw_device_id virtio_ids[] = {
1094 { CCW_DEVICE(0x3832, 0) },
1095 {},
1096};
1097MODULE_DEVICE_TABLE(ccw, virtio_ids);
1098
1099static struct ccw_driver virtio_ccw_driver = {
1100 .driver = {
1101 .owner = THIS_MODULE,
1102 .name = "virtio_ccw",
1103 },
1104 .ids = virtio_ids,
1105 .probe = virtio_ccw_probe,
1106 .remove = virtio_ccw_remove,
1107 .set_offline = virtio_ccw_offline,
1108 .set_online = virtio_ccw_online,
1109 .notify = virtio_ccw_cio_notify,
89f88337 1110 .int_class = IRQIO_VIR,
7e64e059
CH
1111};
1112
1113static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1114 int max_digit, int max_val)
1115{
1116 int diff;
1117
1118 diff = 0;
1119 *val = 0;
1120
1121 while (diff <= max_digit) {
1122 int value = hex_to_bin(**cp);
1123
1124 if (value < 0)
1125 break;
1126 *val = *val * 16 + value;
1127 (*cp)++;
1128 diff++;
1129 }
1130
1131 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1132 return 1;
1133
1134 return 0;
1135}
1136
1137static int __init parse_busid(char *str, unsigned int *cssid,
1138 unsigned int *ssid, unsigned int *devno)
1139{
1140 char *str_work;
1141 int rc, ret;
1142
1143 rc = 1;
1144
1145 if (*str == '\0')
1146 goto out;
1147
1148 str_work = str;
1149 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1150 if (ret || (str_work[0] != '.'))
1151 goto out;
1152 str_work++;
1153 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1154 if (ret || (str_work[0] != '.'))
1155 goto out;
1156 str_work++;
1157 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1158 if (ret || (str_work[0] != '\0'))
1159 goto out;
1160
1161 rc = 0;
1162out:
1163 return rc;
1164}
1165
1166static void __init no_auto_parse(void)
1167{
1168 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1169 char *parm, *str;
1170 int rc;
1171
1172 str = no_auto;
1173 while ((parm = strsep(&str, ","))) {
1174 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1175 &from_ssid, &from);
1176 if (rc)
1177 continue;
1178 if (parm != NULL) {
1179 rc = parse_busid(parm, &to_cssid,
1180 &to_ssid, &to);
1181 if ((from_ssid > to_ssid) ||
1182 ((from_ssid == to_ssid) && (from > to)))
1183 rc = -EINVAL;
1184 } else {
1185 to_cssid = from_cssid;
1186 to_ssid = from_ssid;
1187 to = from;
1188 }
1189 if (rc)
1190 continue;
1191 while ((from_ssid < to_ssid) ||
1192 ((from_ssid == to_ssid) && (from <= to))) {
1193 set_bit(from, devs_no_auto[from_ssid]);
1194 from++;
1195 if (from > __MAX_SUBCHANNEL) {
1196 from_ssid++;
1197 from = 0;
1198 }
1199 }
1200 }
1201}
1202
1203static int __init virtio_ccw_init(void)
1204{
1205 /* parse no_auto string before we do anything further */
1206 no_auto_parse();
1207 return ccw_driver_register(&virtio_ccw_driver);
1208}
1209module_init(virtio_ccw_init);
1210
1211static void __exit virtio_ccw_exit(void)
1212{
96b14536
CH
1213 int i;
1214
7e64e059 1215 ccw_driver_unregister(&virtio_ccw_driver);
96b14536
CH
1216 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1217 destroy_airq_info(airq_areas[i]);
7e64e059
CH
1218}
1219module_exit(virtio_ccw_exit);