]> git.proxmox.com Git - mirror_qemu.git/blob - pc-bios/s390-ccw/virtio.c
b33373495589a86246734f4d8adeb1833f9a8b10
[mirror_qemu.git] / pc-bios / s390-ccw / virtio.c
1 /*
2 * Virtio driver bits
3 *
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or (at
7 * your option) any later version. See the COPYING file in the top-level
8 * directory.
9 */
10
11 #include "s390-ccw.h"
12 #include "virtio.h"
13 #include "virtio-scsi.h"
14
15 #define VRING_WAIT_REPLY_TIMEOUT 3
16
17 static VRing block[VIRTIO_MAX_VQS];
18 static char ring_area[VIRTIO_RING_SIZE * VIRTIO_MAX_VQS]
19 __attribute__((__aligned__(PAGE_SIZE)));
20
21 static char chsc_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
22
23 static VDev vdev = {
24 .nr_vqs = 1,
25 .vrings = block,
26 .cmd_vr_idx = 0,
27 .ring_area = ring_area,
28 .wait_reply_timeout = VRING_WAIT_REPLY_TIMEOUT,
29 .schid = { .one = 1 },
30 .scsi_block_size = VIRTIO_SCSI_BLOCK_SIZE,
31 .blk_factor = 1,
32 };
33
34 VDev *virtio_get_device(void)
35 {
36 return &vdev;
37 }
38
39 VirtioDevType virtio_get_device_type(void)
40 {
41 return vdev.senseid.cu_model;
42 }
43
44 /* virtio spec v1.0 para 4.3.3.2 */
45 static long kvm_hypercall(unsigned long nr, unsigned long param1,
46 unsigned long param2, unsigned long param3)
47 {
48 register ulong r_nr asm("1") = nr;
49 register ulong r_param1 asm("2") = param1;
50 register ulong r_param2 asm("3") = param2;
51 register ulong r_param3 asm("4") = param3;
52 register long retval asm("2");
53
54 asm volatile ("diag 2,4,0x500"
55 : "=d" (retval)
56 : "d" (r_nr), "0" (r_param1), "r"(r_param2), "d"(r_param3)
57 : "memory", "cc");
58
59 return retval;
60 }
61
62 static long virtio_notify(SubChannelId schid, int vq_idx, long cookie)
63 {
64 return kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32 *)&schid,
65 vq_idx, cookie);
66 }
67
68 /***********************************************
69 * Virtio functions *
70 ***********************************************/
71
72 static int drain_irqs(SubChannelId schid)
73 {
74 Irb irb = {};
75 int r = 0;
76
77 while (1) {
78 /* FIXME: make use of TPI, for that enable subchannel and isc */
79 if (tsch(schid, &irb)) {
80 /* Might want to differentiate error codes later on. */
81 if (irb.scsw.cstat) {
82 r = -EIO;
83 } else if (irb.scsw.dstat != 0xc) {
84 r = -EIO;
85 }
86 return r;
87 }
88 }
89 }
90
91 static int run_ccw(VDev *vdev, int cmd, void *ptr, int len)
92 {
93 Ccw1 ccw = {};
94 CmdOrb orb = {};
95 Schib schib;
96 int r;
97
98 /* start command processing */
99 stsch_err(vdev->schid, &schib);
100 /* enable the subchannel for IPL device */
101 schib.pmcw.ena = 1;
102 msch(vdev->schid, &schib);
103
104 /* start subchannel command */
105 orb.fmt = 1;
106 orb.cpa = (u32)(long)&ccw;
107 orb.lpm = 0x80;
108
109 ccw.cmd_code = cmd;
110 ccw.cda = (long)ptr;
111 ccw.count = len;
112
113 r = ssch(vdev->schid, &orb);
114 /*
115 * XXX Wait until device is done processing the CCW. For now we can
116 * assume that a simple tsch will have finished the CCW processing,
117 * but the architecture allows for asynchronous operation
118 */
119 if (!r) {
120 r = drain_irqs(vdev->schid);
121 }
122 return r;
123 }
124
125 static void vring_init(VRing *vr, VqInfo *info)
126 {
127 void *p = (void *) info->queue;
128
129 debug_print_addr("init p", p);
130 vr->id = info->index;
131 vr->num = info->num;
132 vr->desc = p;
133 vr->avail = p + info->num * sizeof(VRingDesc);
134 vr->used = (void *)(((unsigned long)&vr->avail->ring[info->num]
135 + info->align - 1) & ~(info->align - 1));
136
137 /* Zero out all relevant field */
138 vr->avail->flags = 0;
139 vr->avail->idx = 0;
140
141 /* We're running with interrupts off anyways, so don't bother */
142 vr->used->flags = VRING_USED_F_NO_NOTIFY;
143 vr->used->idx = 0;
144 vr->used_idx = 0;
145 vr->next_idx = 0;
146 vr->cookie = 0;
147
148 debug_print_addr("init vr", vr);
149 }
150
151 static bool vring_notify(VRing *vr)
152 {
153 vr->cookie = virtio_notify(vr->schid, vr->id, vr->cookie);
154 return vr->cookie >= 0;
155 }
156
157 static void vring_send_buf(VRing *vr, void *p, int len, int flags)
158 {
159 /* For follow-up chains we need to keep the first entry point */
160 if (!(flags & VRING_HIDDEN_IS_CHAIN)) {
161 vr->avail->ring[vr->avail->idx % vr->num] = vr->next_idx;
162 }
163
164 vr->desc[vr->next_idx].addr = (ulong)p;
165 vr->desc[vr->next_idx].len = len;
166 vr->desc[vr->next_idx].flags = flags & ~VRING_HIDDEN_IS_CHAIN;
167 vr->desc[vr->next_idx].next = vr->next_idx;
168 vr->desc[vr->next_idx].next++;
169 vr->next_idx++;
170
171 /* Chains only have a single ID */
172 if (!(flags & VRING_DESC_F_NEXT)) {
173 vr->avail->idx++;
174 }
175 }
176
177 static u64 get_clock(void)
178 {
179 u64 r;
180
181 asm volatile("stck %0" : "=Q" (r) : : "cc");
182 return r;
183 }
184
185 ulong get_second(void)
186 {
187 return (get_clock() >> 12) / 1000000;
188 }
189
190 static int vr_poll(VRing *vr)
191 {
192 if (vr->used->idx == vr->used_idx) {
193 vring_notify(vr);
194 yield();
195 return 0;
196 }
197
198 vr->used_idx = vr->used->idx;
199 vr->next_idx = 0;
200 vr->desc[0].len = 0;
201 vr->desc[0].flags = 0;
202 return 1; /* vr has been updated */
203 }
204
205 /*
206 * Wait for the host to reply.
207 *
208 * timeout is in seconds if > 0.
209 *
210 * Returns 0 on success, 1 on timeout.
211 */
212 static int vring_wait_reply(void)
213 {
214 ulong target_second = get_second() + vdev.wait_reply_timeout;
215
216 /* Wait for any queue to be updated by the host */
217 do {
218 int i, r = 0;
219
220 for (i = 0; i < vdev.nr_vqs; i++) {
221 r += vr_poll(&vdev.vrings[i]);
222 }
223 yield();
224 if (r) {
225 return 0;
226 }
227 } while (!vdev.wait_reply_timeout || (get_second() < target_second));
228
229 return 1;
230 }
231
232 int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd)
233 {
234 VRing *vr = &vdev->vrings[vqid];
235 int i = 0;
236
237 do {
238 vring_send_buf(vr, cmd[i].data, cmd[i].size,
239 cmd[i].flags | (i ? VRING_HIDDEN_IS_CHAIN : 0));
240 } while (cmd[i++].flags & VRING_DESC_F_NEXT);
241
242 vring_wait_reply();
243 if (drain_irqs(vr->schid)) {
244 return -1;
245 }
246 return 0;
247 }
248
249 /***********************************************
250 * Virtio block *
251 ***********************************************/
252
253 static int virtio_blk_read_many(VDev *vdev,
254 ulong sector, void *load_addr, int sec_num)
255 {
256 VirtioBlkOuthdr out_hdr;
257 u8 status;
258 VRing *vr = &vdev->vrings[vdev->cmd_vr_idx];
259
260 /* Tell the host we want to read */
261 out_hdr.type = VIRTIO_BLK_T_IN;
262 out_hdr.ioprio = 99;
263 out_hdr.sector = virtio_sector_adjust(sector);
264
265 vring_send_buf(vr, &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT);
266
267 /* This is where we want to receive data */
268 vring_send_buf(vr, load_addr, virtio_get_block_size() * sec_num,
269 VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
270 VRING_DESC_F_NEXT);
271
272 /* status field */
273 vring_send_buf(vr, &status, sizeof(u8),
274 VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN);
275
276 /* Now we can tell the host to read */
277 vring_wait_reply();
278
279 if (drain_irqs(vr->schid)) {
280 /* Well, whatever status is supposed to contain... */
281 status = 1;
282 }
283 return status;
284 }
285
286 int virtio_read_many(ulong sector, void *load_addr, int sec_num)
287 {
288 switch (vdev.senseid.cu_model) {
289 case VIRTIO_ID_BLOCK:
290 return virtio_blk_read_many(&vdev, sector, load_addr, sec_num);
291 case VIRTIO_ID_SCSI:
292 return virtio_scsi_read_many(&vdev, sector, load_addr, sec_num);
293 }
294 panic("\n! No readable IPL device !\n");
295 return -1;
296 }
297
298 unsigned long virtio_load_direct(ulong rec_list1, ulong rec_list2,
299 ulong subchan_id, void *load_addr)
300 {
301 u8 status;
302 int sec = rec_list1;
303 int sec_num = ((rec_list2 >> 32) & 0xffff) + 1;
304 int sec_len = rec_list2 >> 48;
305 ulong addr = (ulong)load_addr;
306
307 if (sec_len != virtio_get_block_size()) {
308 return -1;
309 }
310
311 sclp_print(".");
312 status = virtio_read_many(sec, (void *)addr, sec_num);
313 if (status) {
314 panic("I/O Error");
315 }
316 addr += sec_num * virtio_get_block_size();
317
318 return addr;
319 }
320
321 int virtio_read(ulong sector, void *load_addr)
322 {
323 return virtio_read_many(sector, load_addr, 1);
324 }
325
326 /*
327 * Other supported value pairs, if any, would need to be added here.
328 * Note: head count is always 15.
329 */
330 static inline u8 virtio_eckd_sectors_for_block_size(int size)
331 {
332 switch (size) {
333 case 512:
334 return 49;
335 case 1024:
336 return 33;
337 case 2048:
338 return 21;
339 case 4096:
340 return 12;
341 }
342 return 0;
343 }
344
345 VirtioGDN virtio_guessed_disk_nature(void)
346 {
347 return vdev.guessed_disk_nature;
348 }
349
350 void virtio_assume_scsi(void)
351 {
352 switch (vdev.senseid.cu_model) {
353 case VIRTIO_ID_BLOCK:
354 vdev.guessed_disk_nature = VIRTIO_GDN_SCSI;
355 vdev.config.blk.blk_size = VIRTIO_SCSI_BLOCK_SIZE;
356 vdev.config.blk.physical_block_exp = 0;
357 vdev.blk_factor = 1;
358 break;
359 case VIRTIO_ID_SCSI:
360 vdev.scsi_block_size = VIRTIO_SCSI_BLOCK_SIZE;
361 break;
362 }
363 }
364
365 void virtio_assume_iso9660(void)
366 {
367 switch (vdev.senseid.cu_model) {
368 case VIRTIO_ID_BLOCK:
369 vdev.guessed_disk_nature = VIRTIO_GDN_SCSI;
370 vdev.config.blk.blk_size = VIRTIO_ISO_BLOCK_SIZE;
371 vdev.config.blk.physical_block_exp = 0;
372 vdev.blk_factor = VIRTIO_ISO_BLOCK_SIZE / VIRTIO_SECTOR_SIZE;
373 break;
374 case VIRTIO_ID_SCSI:
375 vdev.scsi_block_size = VIRTIO_ISO_BLOCK_SIZE;
376 break;
377 }
378 }
379
380 void virtio_assume_eckd(void)
381 {
382 vdev.guessed_disk_nature = VIRTIO_GDN_DASD;
383 vdev.blk_factor = 1;
384 vdev.config.blk.physical_block_exp = 0;
385 switch (vdev.senseid.cu_model) {
386 case VIRTIO_ID_BLOCK:
387 vdev.config.blk.blk_size = 4096;
388 break;
389 case VIRTIO_ID_SCSI:
390 vdev.config.blk.blk_size = vdev.scsi_block_size;
391 break;
392 }
393 vdev.config.blk.geometry.heads = 15;
394 vdev.config.blk.geometry.sectors =
395 virtio_eckd_sectors_for_block_size(vdev.config.blk.blk_size);
396 }
397
398 bool virtio_disk_is_scsi(void)
399 {
400 if (vdev.guessed_disk_nature == VIRTIO_GDN_SCSI) {
401 return true;
402 }
403 switch (vdev.senseid.cu_model) {
404 case VIRTIO_ID_BLOCK:
405 return (vdev.config.blk.geometry.heads == 255)
406 && (vdev.config.blk.geometry.sectors == 63)
407 && (virtio_get_block_size() == VIRTIO_SCSI_BLOCK_SIZE);
408 case VIRTIO_ID_SCSI:
409 return true;
410 }
411 return false;
412 }
413
414 bool virtio_disk_is_eckd(void)
415 {
416 const int block_size = virtio_get_block_size();
417
418 if (vdev.guessed_disk_nature == VIRTIO_GDN_DASD) {
419 return true;
420 }
421 switch (vdev.senseid.cu_model) {
422 case VIRTIO_ID_BLOCK:
423 return (vdev.config.blk.geometry.heads == 15)
424 && (vdev.config.blk.geometry.sectors ==
425 virtio_eckd_sectors_for_block_size(block_size));
426 case VIRTIO_ID_SCSI:
427 return false;
428 }
429 return false;
430 }
431
432 bool virtio_ipl_disk_is_valid(void)
433 {
434 return virtio_disk_is_scsi() || virtio_disk_is_eckd();
435 }
436
437 int virtio_get_block_size(void)
438 {
439 switch (vdev.senseid.cu_model) {
440 case VIRTIO_ID_BLOCK:
441 return vdev.config.blk.blk_size << vdev.config.blk.physical_block_exp;
442 case VIRTIO_ID_SCSI:
443 return vdev.scsi_block_size;
444 }
445 return 0;
446 }
447
448 uint8_t virtio_get_heads(void)
449 {
450 switch (vdev.senseid.cu_model) {
451 case VIRTIO_ID_BLOCK:
452 return vdev.config.blk.geometry.heads;
453 case VIRTIO_ID_SCSI:
454 return vdev.guessed_disk_nature == VIRTIO_GDN_DASD
455 ? vdev.config.blk.geometry.heads : 255;
456 }
457 return 0;
458 }
459
460 uint8_t virtio_get_sectors(void)
461 {
462 switch (vdev.senseid.cu_model) {
463 case VIRTIO_ID_BLOCK:
464 return vdev.config.blk.geometry.sectors;
465 case VIRTIO_ID_SCSI:
466 return vdev.guessed_disk_nature == VIRTIO_GDN_DASD
467 ? vdev.config.blk.geometry.sectors : 63;
468 }
469 return 0;
470 }
471
472 uint64_t virtio_get_blocks(void)
473 {
474 const uint64_t factor = virtio_get_block_size() / VIRTIO_SECTOR_SIZE;
475 switch (vdev.senseid.cu_model) {
476 case VIRTIO_ID_BLOCK:
477 return vdev.config.blk.capacity / factor;
478 case VIRTIO_ID_SCSI:
479 return vdev.scsi_last_block / factor;
480 }
481 return 0;
482 }
483
484 static void virtio_setup_ccw(VDev *vdev)
485 {
486 int i, cfg_size = 0;
487 unsigned char status = VIRTIO_CONFIG_S_DRIVER_OK;
488
489 IPL_assert(virtio_is_supported(vdev->schid), "PE");
490 /* device ID has been established now */
491
492 vdev->config.blk.blk_size = 0; /* mark "illegal" - setup started... */
493 vdev->guessed_disk_nature = VIRTIO_GDN_NONE;
494
495 run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0);
496
497 switch (vdev->senseid.cu_model) {
498 case VIRTIO_ID_BLOCK:
499 vdev->nr_vqs = 1;
500 vdev->cmd_vr_idx = 0;
501 cfg_size = sizeof(vdev->config.blk);
502 break;
503 case VIRTIO_ID_SCSI:
504 vdev->nr_vqs = 3;
505 vdev->cmd_vr_idx = VR_REQUEST;
506 cfg_size = sizeof(vdev->config.scsi);
507 break;
508 default:
509 panic("Unsupported virtio device\n");
510 }
511 IPL_assert(run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size) == 0,
512 "Could not get block device configuration");
513
514 /*
515 * Skipping CCW_CMD_READ_FEAT. We're not doing anything fancy, and
516 * we'll just stop dead anyway if anything does not work like we
517 * expect it.
518 */
519
520 for (i = 0; i < vdev->nr_vqs; i++) {
521 VqInfo info = {
522 .queue = (unsigned long long) ring_area + (i * VIRTIO_RING_SIZE),
523 .align = KVM_S390_VIRTIO_RING_ALIGN,
524 .index = i,
525 .num = 0,
526 };
527 VqConfig config = {
528 .index = i,
529 .num = 0,
530 };
531
532 IPL_assert(
533 run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config)) == 0,
534 "Could not get block device VQ configuration");
535 info.num = config.num;
536 vring_init(&vdev->vrings[i], &info);
537 vdev->vrings[i].schid = vdev->schid;
538 IPL_assert(run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info)) == 0,
539 "Cannot set VQ info");
540 }
541 IPL_assert(
542 run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status)) == 0,
543 "Could not write status to host");
544 }
545
546 void virtio_setup_device(SubChannelId schid)
547 {
548 vdev.schid = schid;
549 virtio_setup_ccw(&vdev);
550
551 switch (vdev.senseid.cu_model) {
552 case VIRTIO_ID_BLOCK:
553 sclp_print("Using virtio-blk.\n");
554 if (!virtio_ipl_disk_is_valid()) {
555 /* make sure all getters but blocksize return 0 for
556 * invalid IPL disk
557 */
558 memset(&vdev.config.blk, 0, sizeof(vdev.config.blk));
559 virtio_assume_scsi();
560 }
561 break;
562 case VIRTIO_ID_SCSI:
563 IPL_assert(vdev.config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE,
564 "Config: sense size mismatch");
565 IPL_assert(vdev.config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE,
566 "Config: CDB size mismatch");
567
568 sclp_print("Using virtio-scsi.\n");
569 virtio_scsi_setup(&vdev);
570 break;
571 default:
572 panic("\n! No IPL device available !\n");
573 }
574 }
575
576 bool virtio_is_supported(SubChannelId schid)
577 {
578 vdev.schid = schid;
579 memset(&vdev.senseid, 0, sizeof(vdev.senseid));
580 /* run sense id command */
581 if (run_ccw(&vdev, CCW_CMD_SENSE_ID, &vdev.senseid, sizeof(vdev.senseid))) {
582 return false;
583 }
584 if (vdev.senseid.cu_type == 0x3832) {
585 switch (vdev.senseid.cu_model) {
586 case VIRTIO_ID_BLOCK:
587 case VIRTIO_ID_SCSI:
588 return true;
589 }
590 }
591 return false;
592 }
593
594 int enable_mss_facility(void)
595 {
596 int ret;
597 ChscAreaSda *sda_area = (ChscAreaSda *) chsc_page;
598
599 memset(sda_area, 0, PAGE_SIZE);
600 sda_area->request.length = 0x0400;
601 sda_area->request.code = 0x0031;
602 sda_area->operation_code = 0x2;
603
604 ret = chsc(sda_area);
605 if ((ret == 0) && (sda_area->response.code == 0x0001)) {
606 return 0;
607 }
608 return -EIO;
609 }