]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/s390/block/dasd_eer.c
Merge branch 'linus' into core/softlockup
[mirror_ubuntu-hirsute-kernel.git] / drivers / s390 / block / dasd_eer.c
CommitLineData
20c64468
SW
1/*
2 * Character device driver for extended error reporting.
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * extended error reporting for DASD ECKD devices
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/device.h>
16#include <linux/poll.h>
3006d7c6 17#include <linux/mutex.h>
986837ba 18#include <linux/smp_lock.h>
20c64468
SW
19
20#include <asm/uaccess.h>
20c64468
SW
21#include <asm/atomic.h>
22#include <asm/ebcdic.h>
23
24#include "dasd_int.h"
25#include "dasd_eckd.h"
26
27#ifdef PRINTK_HEADER
28#undef PRINTK_HEADER
29#endif /* PRINTK_HEADER */
30#define PRINTK_HEADER "dasd(eer):"
31
32/*
33 * SECTION: the internal buffer
34 */
35
36/*
37 * The internal buffer is meant to store obaque blobs of data, so it does
38 * not know of higher level concepts like triggers.
39 * It consists of a number of pages that are used as a ringbuffer. Each data
40 * blob is stored in a simple record that consists of an integer, which
41 * contains the size of the following data, and the data bytes themselfes.
42 *
43 * To allow for multiple independent readers we create one internal buffer
44 * each time the device is opened and destroy the buffer when the file is
45 * closed again. The number of pages used for this buffer is determined by
46 * the module parmeter eer_pages.
47 *
48 * One record can be written to a buffer by using the functions
49 * - dasd_eer_start_record (one time per record to write the size to the
50 * buffer and reserve the space for the data)
51 * - dasd_eer_write_buffer (one or more times per record to write the data)
52 * The data can be written in several steps but you will have to compute
53 * the total size up front for the invocation of dasd_eer_start_record.
54 * If the ringbuffer is full, dasd_eer_start_record will remove the required
55 * number of old records.
56 *
57 * A record is typically read in two steps, first read the integer that
58 * specifies the size of the following data, then read the data.
59 * Both can be done by
60 * - dasd_eer_read_buffer
61 *
62 * For all mentioned functions you need to get the bufferlock first and keep
63 * it until a complete record is written or read.
64 *
65 * All information necessary to keep track of an internal buffer is kept in
66 * a struct eerbuffer. The buffer specific to a file pointer is strored in
67 * the private_data field of that file. To be able to write data to all
68 * existing buffers, each buffer is also added to the bufferlist.
69 * If the user does not want to read a complete record in one go, we have to
70 * keep track of the rest of the record. residual stores the number of bytes
71 * that are still to deliver. If the rest of the record is invalidated between
72 * two reads then residual will be set to -1 so that the next read will fail.
73 * All entries in the eerbuffer structure are protected with the bufferlock.
74 * To avoid races between writing to a buffer on the one side and creating
75 * and destroying buffers on the other side, the bufferlock must also be used
76 * to protect the bufferlist.
77 */
78
79static int eer_pages = 5;
80module_param(eer_pages, int, S_IRUGO|S_IWUSR);
81
82struct eerbuffer {
83 struct list_head list;
84 char **buffer;
85 int buffersize;
86 int buffer_page_count;
87 int head;
88 int tail;
89 int residual;
90};
91
92static LIST_HEAD(bufferlist);
34af946a 93static DEFINE_SPINLOCK(bufferlock);
20c64468
SW
94static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
95
96/*
97 * How many free bytes are available on the buffer.
98 * Needs to be called with bufferlock held.
99 */
100static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
101{
102 if (eerb->head < eerb->tail)
103 return eerb->tail - eerb->head - 1;
104 return eerb->buffersize - eerb->head + eerb->tail -1;
105}
106
107/*
108 * How many bytes of buffer space are used.
109 * Needs to be called with bufferlock held.
110 */
111static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
112{
113
114 if (eerb->head >= eerb->tail)
115 return eerb->head - eerb->tail;
116 return eerb->buffersize - eerb->tail + eerb->head;
117}
118
119/*
120 * The dasd_eer_write_buffer function just copies count bytes of data
121 * to the buffer. Make sure to call dasd_eer_start_record first, to
122 * make sure that enough free space is available.
123 * Needs to be called with bufferlock held.
124 */
125static void dasd_eer_write_buffer(struct eerbuffer *eerb,
126 char *data, int count)
127{
128
129 unsigned long headindex,localhead;
130 unsigned long rest, len;
131 char *nextdata;
132
133 nextdata = data;
134 rest = count;
135 while (rest > 0) {
136 headindex = eerb->head / PAGE_SIZE;
137 localhead = eerb->head % PAGE_SIZE;
138 len = min(rest, PAGE_SIZE - localhead);
139 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
140 nextdata += len;
141 rest -= len;
142 eerb->head += len;
143 if (eerb->head == eerb->buffersize)
144 eerb->head = 0; /* wrap around */
145 BUG_ON(eerb->head > eerb->buffersize);
146 }
147}
148
149/*
150 * Needs to be called with bufferlock held.
151 */
152static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
153{
154
155 unsigned long tailindex,localtail;
156 unsigned long rest, len, finalcount;
157 char *nextdata;
158
159 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
160 nextdata = data;
161 rest = finalcount;
162 while (rest > 0) {
163 tailindex = eerb->tail / PAGE_SIZE;
164 localtail = eerb->tail % PAGE_SIZE;
165 len = min(rest, PAGE_SIZE - localtail);
166 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
167 nextdata += len;
168 rest -= len;
169 eerb->tail += len;
170 if (eerb->tail == eerb->buffersize)
171 eerb->tail = 0; /* wrap around */
172 BUG_ON(eerb->tail > eerb->buffersize);
173 }
174 return finalcount;
175}
176
177/*
178 * Whenever you want to write a blob of data to the internal buffer you
179 * have to start by using this function first. It will write the number
180 * of bytes that will be written to the buffer. If necessary it will remove
181 * old records to make room for the new one.
182 * Needs to be called with bufferlock held.
183 */
184static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
185{
186 int tailcount;
187
188 if (count + sizeof(count) > eerb->buffersize)
189 return -ENOMEM;
190 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
191 if (eerb->residual > 0) {
192 eerb->tail += eerb->residual;
193 if (eerb->tail >= eerb->buffersize)
194 eerb->tail -= eerb->buffersize;
195 eerb->residual = -1;
196 }
197 dasd_eer_read_buffer(eerb, (char *) &tailcount,
198 sizeof(tailcount));
199 eerb->tail += tailcount;
200 if (eerb->tail >= eerb->buffersize)
201 eerb->tail -= eerb->buffersize;
202 }
203 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
204
205 return 0;
206};
207
208/*
209 * Release pages that are not used anymore.
210 */
211static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
212{
213 int i;
214
215 for (i = 0; i < no_pages; i++)
216 free_page((unsigned long) buf[i]);
217}
218
219/*
220 * Allocate a new set of memory pages.
221 */
222static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
223{
224 int i;
225
226 for (i = 0; i < no_pages; i++) {
227 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
228 if (!buf[i]) {
229 dasd_eer_free_buffer_pages(buf, i);
230 return -ENOMEM;
231 }
232 }
233 return 0;
234}
235
236/*
237 * SECTION: The extended error reporting functionality
238 */
239
240/*
241 * When a DASD device driver wants to report an error, it calls the
242 * function dasd_eer_write and gives the respective trigger ID as
243 * parameter. Currently there are four kinds of triggers:
244 *
245 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
246 * DASD_EER_PPRCSUSPEND: PPRC was suspended
247 * DASD_EER_NOPATH: There is no path to the device left.
248 * DASD_EER_STATECHANGE: The state of the device has changed.
249 *
250 * For the first three triggers all required information can be supplied by
251 * the caller. For these triggers a record is written by the function
252 * dasd_eer_write_standard_trigger.
253 *
254 * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
255 * status ccw need to be executed to gather the necessary sense data first.
256 * The dasd_eer_snss function will queue the SNSS request and the request
257 * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
258 * trigger.
259 *
260 * To avoid memory allocations at runtime, the necessary memory is allocated
261 * when the extended error reporting is enabled for a device (by
262 * dasd_eer_probe). There is one sense subsystem status request for each
263 * eer enabled DASD device. The presence of the cqr in device->eer_cqr
264 * indicates that eer is enable for the device. The use of the snss request
265 * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
266 * that the cqr is currently in use, dasd_eer_snss cannot start a second
267 * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
268 * the SNSS request will check the bit and call dasd_eer_snss again.
269 */
270
271#define SNSS_DATA_SIZE 44
272
273#define DASD_EER_BUSID_SIZE 10
274struct dasd_eer_header {
275 __u32 total_size;
276 __u32 trigger;
277 __u64 tv_sec;
278 __u64 tv_usec;
279 char busid[DASD_EER_BUSID_SIZE];
774fc4ef 280} __attribute__ ((packed));
20c64468
SW
281
282/*
283 * The following function can be used for those triggers that have
284 * all necessary data available when the function is called.
285 * If the parameter cqr is not NULL, the chain of requests will be searched
286 * for valid sense data, and all valid sense data sets will be added to
287 * the triggers data.
288 */
289static void dasd_eer_write_standard_trigger(struct dasd_device *device,
290 struct dasd_ccw_req *cqr,
291 int trigger)
292{
293 struct dasd_ccw_req *temp_cqr;
294 int data_size;
295 struct timeval tv;
296 struct dasd_eer_header header;
297 unsigned long flags;
298 struct eerbuffer *eerb;
299
300 /* go through cqr chain and count the valid sense data sets */
301 data_size = 0;
302 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
303 if (temp_cqr->irb.esw.esw0.erw.cons)
304 data_size += 32;
305
306 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
307 header.trigger = trigger;
308 do_gettimeofday(&tv);
309 header.tv_sec = tv.tv_sec;
310 header.tv_usec = tv.tv_usec;
311 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
312
313 spin_lock_irqsave(&bufferlock, flags);
314 list_for_each_entry(eerb, &bufferlist, list) {
315 dasd_eer_start_record(eerb, header.total_size);
316 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
317 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
318 if (temp_cqr->irb.esw.esw0.erw.cons)
319 dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32);
320 dasd_eer_write_buffer(eerb, "EOR", 4);
321 }
322 spin_unlock_irqrestore(&bufferlock, flags);
323 wake_up_interruptible(&dasd_eer_read_wait_queue);
324}
325
326/*
327 * This function writes a DASD_EER_STATECHANGE trigger.
328 */
329static void dasd_eer_write_snss_trigger(struct dasd_device *device,
330 struct dasd_ccw_req *cqr,
331 int trigger)
332{
333 int data_size;
334 int snss_rc;
335 struct timeval tv;
336 struct dasd_eer_header header;
337 unsigned long flags;
338 struct eerbuffer *eerb;
339
8e09f215 340 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
20c64468
SW
341 if (snss_rc)
342 data_size = 0;
343 else
344 data_size = SNSS_DATA_SIZE;
345
346 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
347 header.trigger = DASD_EER_STATECHANGE;
348 do_gettimeofday(&tv);
349 header.tv_sec = tv.tv_sec;
350 header.tv_usec = tv.tv_usec;
351 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
352
353 spin_lock_irqsave(&bufferlock, flags);
354 list_for_each_entry(eerb, &bufferlist, list) {
355 dasd_eer_start_record(eerb, header.total_size);
356 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
357 if (!snss_rc)
358 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
359 dasd_eer_write_buffer(eerb, "EOR", 4);
360 }
361 spin_unlock_irqrestore(&bufferlock, flags);
362 wake_up_interruptible(&dasd_eer_read_wait_queue);
363}
364
365/*
366 * This function is called for all triggers. It calls the appropriate
367 * function that writes the actual trigger records.
368 */
369void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
370 unsigned int id)
371{
372 if (!device->eer_cqr)
373 return;
374 switch (id) {
375 case DASD_EER_FATALERROR:
376 case DASD_EER_PPRCSUSPEND:
377 dasd_eer_write_standard_trigger(device, cqr, id);
378 break;
379 case DASD_EER_NOPATH:
380 dasd_eer_write_standard_trigger(device, NULL, id);
381 break;
382 case DASD_EER_STATECHANGE:
383 dasd_eer_write_snss_trigger(device, cqr, id);
384 break;
385 default: /* unknown trigger, so we write it without any sense data */
386 dasd_eer_write_standard_trigger(device, NULL, id);
387 break;
388 }
389}
390EXPORT_SYMBOL(dasd_eer_write);
391
392/*
393 * Start a sense subsystem status request.
394 * Needs to be called with the device held.
395 */
396void dasd_eer_snss(struct dasd_device *device)
397{
398 struct dasd_ccw_req *cqr;
399
400 cqr = device->eer_cqr;
401 if (!cqr) /* Device not eer enabled. */
402 return;
403 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
404 /* Sense subsystem status request in use. */
405 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
406 return;
407 }
8e09f215 408 /* cdev is already locked, can't use dasd_add_request_head */
20c64468
SW
409 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
410 cqr->status = DASD_CQR_QUEUED;
8e09f215
SW
411 list_add(&cqr->devlist, &device->ccw_queue);
412 dasd_schedule_device_bh(device);
20c64468
SW
413}
414
415/*
416 * Callback function for use with sense subsystem status request.
417 */
418static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
419{
8e09f215 420 struct dasd_device *device = cqr->startdev;
20c64468
SW
421 unsigned long flags;
422
423 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
424 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
425 if (device->eer_cqr == cqr) {
426 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
427 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
428 /* Another SNSS has been requested in the meantime. */
429 dasd_eer_snss(device);
430 cqr = NULL;
431 }
432 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
433 if (cqr)
434 /*
435 * Extended error recovery has been switched off while
436 * the SNSS request was running. It could even have
437 * been switched off and on again in which case there
438 * is a new ccw in device->eer_cqr. Free the "old"
439 * snss request now.
440 */
441 dasd_kfree_request(cqr, device);
442}
443
444/*
445 * Enable error reporting on a given device.
446 */
447int dasd_eer_enable(struct dasd_device *device)
448{
449 struct dasd_ccw_req *cqr;
450 unsigned long flags;
451
452 if (device->eer_cqr)
453 return 0;
454
455 if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
456 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
457
458 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
459 SNSS_DATA_SIZE, device);
460 if (!cqr)
461 return -ENOMEM;
462
8e09f215 463 cqr->startdev = device;
20c64468
SW
464 cqr->retries = 255;
465 cqr->expires = 10 * HZ;
046f3e82 466 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
20c64468
SW
467
468 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
469 cqr->cpaddr->count = SNSS_DATA_SIZE;
470 cqr->cpaddr->flags = 0;
471 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
472
473 cqr->buildclk = get_clock();
474 cqr->status = DASD_CQR_FILLED;
475 cqr->callback = dasd_eer_snss_cb;
476
477 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
478 if (!device->eer_cqr) {
479 device->eer_cqr = cqr;
480 cqr = NULL;
481 }
482 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
483 if (cqr)
484 dasd_kfree_request(cqr, device);
485 return 0;
486}
487
488/*
489 * Disable error reporting on a given device.
490 */
491void dasd_eer_disable(struct dasd_device *device)
492{
493 struct dasd_ccw_req *cqr;
494 unsigned long flags;
495 int in_use;
496
497 if (!device->eer_cqr)
498 return;
499 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
500 cqr = device->eer_cqr;
501 device->eer_cqr = NULL;
502 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
503 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
504 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
505 if (cqr && !in_use)
506 dasd_kfree_request(cqr, device);
507}
508
509/*
510 * SECTION: the device operations
511 */
512
513/*
514 * On the one side we need a lock to access our internal buffer, on the
515 * other side a copy_to_user can sleep. So we need to copy the data we have
516 * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
517 */
518static char readbuffer[PAGE_SIZE];
3006d7c6 519static DEFINE_MUTEX(readbuffer_mutex);
20c64468
SW
520
521static int dasd_eer_open(struct inode *inp, struct file *filp)
522{
523 struct eerbuffer *eerb;
524 unsigned long flags;
525
526 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
f45a43d8
SW
527 if (!eerb)
528 return -ENOMEM;
986837ba 529 lock_kernel();
20c64468
SW
530 eerb->buffer_page_count = eer_pages;
531 if (eerb->buffer_page_count < 1 ||
532 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
533 kfree(eerb);
534 MESSAGE(KERN_WARNING, "can't open device since module "
535 "parameter eer_pages is smaller then 1 or"
536 " bigger then %d", (int)(INT_MAX / PAGE_SIZE));
986837ba 537 unlock_kernel();
20c64468
SW
538 return -EINVAL;
539 }
540 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
541 eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
542 GFP_KERNEL);
543 if (!eerb->buffer) {
544 kfree(eerb);
986837ba 545 unlock_kernel();
20c64468
SW
546 return -ENOMEM;
547 }
548 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
549 eerb->buffer_page_count)) {
550 kfree(eerb->buffer);
551 kfree(eerb);
986837ba 552 unlock_kernel();
20c64468
SW
553 return -ENOMEM;
554 }
555 filp->private_data = eerb;
556 spin_lock_irqsave(&bufferlock, flags);
557 list_add(&eerb->list, &bufferlist);
558 spin_unlock_irqrestore(&bufferlock, flags);
559
986837ba 560 unlock_kernel();
20c64468
SW
561 return nonseekable_open(inp,filp);
562}
563
564static int dasd_eer_close(struct inode *inp, struct file *filp)
565{
566 struct eerbuffer *eerb;
567 unsigned long flags;
568
569 eerb = (struct eerbuffer *) filp->private_data;
570 spin_lock_irqsave(&bufferlock, flags);
571 list_del(&eerb->list);
572 spin_unlock_irqrestore(&bufferlock, flags);
573 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
574 kfree(eerb->buffer);
575 kfree(eerb);
576
577 return 0;
578}
579
580static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
581 size_t count, loff_t *ppos)
582{
583 int tc,rc;
584 int tailcount,effective_count;
585 unsigned long flags;
586 struct eerbuffer *eerb;
587
588 eerb = (struct eerbuffer *) filp->private_data;
3006d7c6 589 if (mutex_lock_interruptible(&readbuffer_mutex))
20c64468
SW
590 return -ERESTARTSYS;
591
592 spin_lock_irqsave(&bufferlock, flags);
593
594 if (eerb->residual < 0) { /* the remainder of this record */
595 /* has been deleted */
596 eerb->residual = 0;
597 spin_unlock_irqrestore(&bufferlock, flags);
3006d7c6 598 mutex_unlock(&readbuffer_mutex);
20c64468
SW
599 return -EIO;
600 } else if (eerb->residual > 0) {
601 /* OK we still have a second half of a record to deliver */
602 effective_count = min(eerb->residual, (int) count);
603 eerb->residual -= effective_count;
604 } else {
605 tc = 0;
606 while (!tc) {
607 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
608 sizeof(tailcount));
609 if (!tc) {
610 /* no data available */
611 spin_unlock_irqrestore(&bufferlock, flags);
3006d7c6 612 mutex_unlock(&readbuffer_mutex);
20c64468
SW
613 if (filp->f_flags & O_NONBLOCK)
614 return -EAGAIN;
615 rc = wait_event_interruptible(
616 dasd_eer_read_wait_queue,
617 eerb->head != eerb->tail);
618 if (rc)
619 return rc;
3006d7c6 620 if (mutex_lock_interruptible(&readbuffer_mutex))
20c64468
SW
621 return -ERESTARTSYS;
622 spin_lock_irqsave(&bufferlock, flags);
623 }
624 }
625 WARN_ON(tc != sizeof(tailcount));
626 effective_count = min(tailcount,(int)count);
627 eerb->residual = tailcount - effective_count;
628 }
629
630 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
631 WARN_ON(tc != effective_count);
632
633 spin_unlock_irqrestore(&bufferlock, flags);
634
635 if (copy_to_user(buf, readbuffer, effective_count)) {
3006d7c6 636 mutex_unlock(&readbuffer_mutex);
20c64468
SW
637 return -EFAULT;
638 }
639
3006d7c6 640 mutex_unlock(&readbuffer_mutex);
20c64468
SW
641 return effective_count;
642}
643
644static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
645{
646 unsigned int mask;
647 unsigned long flags;
648 struct eerbuffer *eerb;
649
650 eerb = (struct eerbuffer *) filp->private_data;
651 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
652 spin_lock_irqsave(&bufferlock, flags);
653 if (eerb->head != eerb->tail)
654 mask = POLLIN | POLLRDNORM ;
655 else
656 mask = 0;
657 spin_unlock_irqrestore(&bufferlock, flags);
658 return mask;
659}
660
d54b1fdb 661static const struct file_operations dasd_eer_fops = {
20c64468
SW
662 .open = &dasd_eer_open,
663 .release = &dasd_eer_close,
664 .read = &dasd_eer_read,
665 .poll = &dasd_eer_poll,
666 .owner = THIS_MODULE,
667};
668
e3c699b3 669static struct miscdevice *dasd_eer_dev = NULL;
20c64468
SW
670
671int __init dasd_eer_init(void)
672{
673 int rc;
674
e3c699b3
SW
675 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
676 if (!dasd_eer_dev)
677 return -ENOMEM;
678
679 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
680 dasd_eer_dev->name = "dasd_eer";
681 dasd_eer_dev->fops = &dasd_eer_fops;
682
683 rc = misc_register(dasd_eer_dev);
20c64468 684 if (rc) {
e3c699b3
SW
685 kfree(dasd_eer_dev);
686 dasd_eer_dev = NULL;
20c64468
SW
687 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
688 "register misc device");
689 return rc;
690 }
691
692 return 0;
693}
694
1375fc1f 695void dasd_eer_exit(void)
20c64468 696{
e3c699b3
SW
697 if (dasd_eer_dev) {
698 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
699 kfree(dasd_eer_dev);
700 dasd_eer_dev = NULL;
701 }
20c64468 702}