]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/block/pktcdvd.c
Merge tag 'tag-chrome-platform-for-v5.10' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-jammy-kernel.git] / drivers / block / pktcdvd.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
adb9250a 4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
1da177e4
LT
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
a676f8d0
PO
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10 * DVD-RAM devices.
1da177e4
LT
11 *
12 * Theory of operation:
13 *
a676f8d0
PO
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
22 *
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
26 *
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
33 *
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
38 *
c62b37d9 39 * At the top layer there is a custom ->submit_bio function that forwards
a676f8d0
PO
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
1da177e4
LT
44 *
45 *************************************************************************/
46
99481334
JP
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
1da177e4 49#include <linux/pktcdvd.h>
1da177e4
LT
50#include <linux/module.h>
51#include <linux/types.h>
52#include <linux/kernel.h>
f80a0ca6 53#include <linux/compat.h>
1da177e4
LT
54#include <linux/kthread.h>
55#include <linux/errno.h>
56#include <linux/spinlock.h>
57#include <linux/file.h>
58#include <linux/proc_fs.h>
59#include <linux/seq_file.h>
60#include <linux/miscdevice.h>
7dfb7103 61#include <linux/freezer.h>
1657f824 62#include <linux/mutex.h>
5a0e3ad6 63#include <linux/slab.h>
66114cad 64#include <linux/backing-dev.h>
1da177e4
LT
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_ioctl.h>
cef28963 67#include <scsi/scsi.h>
32694850
TM
68#include <linux/debugfs.h>
69#include <linux/device.h>
55690c07 70#include <linux/nospec.h>
7c0f6ba6 71#include <linux/uaccess.h>
1da177e4 72
7822082d
TM
73#define DRIVER_NAME "pktcdvd"
74
fa63c0ab
JP
75#define pkt_err(pd, fmt, ...) \
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
ca73dabc
JP
77#define pkt_notice(pd, fmt, ...) \
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
0c075d64
JP
79#define pkt_info(pd, fmt, ...) \
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
fa63c0ab 81
844aa797
JP
82#define pkt_dbg(level, pd, fmt, ...) \
83do { \
84 if (level == 2 && PACKET_DEBUG >= 2) \
85 pr_notice("%s: %s():" fmt, \
86 pd->name, __func__, ##__VA_ARGS__); \
87 else if (level == 1 && PACKET_DEBUG >= 1) \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
cd3f2cd0 89} while (0)
1da177e4
LT
90
91#define MAX_SPEED 0xffff
92
2a48fc0a 93static DEFINE_MUTEX(pktcdvd_mutex);
1da177e4
LT
94static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
95static struct proc_dir_entry *pkt_proc;
add21660 96static int pktdev_major;
0a0fc960
TM
97static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
98static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
1657f824 99static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
64c4bc4d
KO
100static mempool_t psd_pool;
101static struct bio_set pkt_bio_set;
1da177e4 102
32694850 103static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
ea5ffff5 104static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
32694850
TM
105
106/* forward declaration */
107static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
108static int pkt_remove_dev(dev_t pkt_dev);
109static int pkt_seq_show(struct seq_file *m, void *p);
110
5323fb77
JP
111static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
112{
113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
114}
32694850
TM
115
116/*
117 * create and register a pktcdvd kernel object.
118 */
119static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
120 const char* name,
121 struct kobject* parent,
122 struct kobj_type* ktype)
123{
124 struct pktcdvd_kobj *p;
89c42606
GKH
125 int error;
126
32694850
TM
127 p = kzalloc(sizeof(*p), GFP_KERNEL);
128 if (!p)
129 return NULL;
32694850 130 p->pd = pd;
89c42606
GKH
131 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
132 if (error) {
d17a18dd 133 kobject_put(&p->kobj);
32694850 134 return NULL;
d17a18dd 135 }
89c42606 136 kobject_uevent(&p->kobj, KOBJ_ADD);
32694850
TM
137 return p;
138}
139/*
140 * remove a pktcdvd kernel object.
141 */
142static void pkt_kobj_remove(struct pktcdvd_kobj *p)
143{
144 if (p)
c10997f6 145 kobject_put(&p->kobj);
32694850
TM
146}
147/*
148 * default release function for pktcdvd kernel objects.
149 */
150static void pkt_kobj_release(struct kobject *kobj)
151{
152 kfree(to_pktcdvdkobj(kobj));
153}
154
155
156/**********************************************************
157 *
158 * sysfs interface for pktcdvd
159 * by (C) 2006 Thomas Maier <balagi@justmail.de>
160 *
161 **********************************************************/
162
163#define DEF_ATTR(_obj,_name,_mode) \
7b595756 164 static struct attribute _obj = { .name = _name, .mode = _mode }
32694850
TM
165
166/**********************************************************
167 /sys/class/pktcdvd/pktcdvd[0-7]/
168 stat/reset
169 stat/packets_started
170 stat/packets_finished
171 stat/kb_written
172 stat/kb_read
173 stat/kb_read_gather
174 write_queue/size
175 write_queue/congestion_off
176 write_queue/congestion_on
177 **********************************************************/
178
179DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
180DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
181DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
182DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
183DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
184DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
185
186static struct attribute *kobj_pkt_attrs_stat[] = {
187 &kobj_pkt_attr_st1,
188 &kobj_pkt_attr_st2,
189 &kobj_pkt_attr_st3,
190 &kobj_pkt_attr_st4,
191 &kobj_pkt_attr_st5,
192 &kobj_pkt_attr_st6,
193 NULL
194};
195
196DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
197DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
198DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
199
200static struct attribute *kobj_pkt_attrs_wqueue[] = {
201 &kobj_pkt_attr_wq1,
202 &kobj_pkt_attr_wq2,
203 &kobj_pkt_attr_wq3,
204 NULL
205};
206
32694850
TM
207static ssize_t kobj_pkt_show(struct kobject *kobj,
208 struct attribute *attr, char *data)
209{
210 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
211 int n = 0;
212 int v;
213 if (strcmp(attr->name, "packets_started") == 0) {
214 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
215
216 } else if (strcmp(attr->name, "packets_finished") == 0) {
217 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
218
219 } else if (strcmp(attr->name, "kb_written") == 0) {
220 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
221
222 } else if (strcmp(attr->name, "kb_read") == 0) {
223 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
224
225 } else if (strcmp(attr->name, "kb_read_gather") == 0) {
226 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
227
228 } else if (strcmp(attr->name, "size") == 0) {
229 spin_lock(&pd->lock);
230 v = pd->bio_queue_size;
231 spin_unlock(&pd->lock);
232 n = sprintf(data, "%d\n", v);
233
234 } else if (strcmp(attr->name, "congestion_off") == 0) {
235 spin_lock(&pd->lock);
236 v = pd->write_congestion_off;
237 spin_unlock(&pd->lock);
238 n = sprintf(data, "%d\n", v);
239
240 } else if (strcmp(attr->name, "congestion_on") == 0) {
241 spin_lock(&pd->lock);
242 v = pd->write_congestion_on;
243 spin_unlock(&pd->lock);
244 n = sprintf(data, "%d\n", v);
245 }
246 return n;
247}
248
249static void init_write_congestion_marks(int* lo, int* hi)
250{
251 if (*hi > 0) {
252 *hi = max(*hi, 500);
253 *hi = min(*hi, 1000000);
254 if (*lo <= 0)
255 *lo = *hi - 100;
256 else {
257 *lo = min(*lo, *hi - 100);
258 *lo = max(*lo, 100);
259 }
260 } else {
261 *hi = -1;
262 *lo = -1;
263 }
264}
265
266static ssize_t kobj_pkt_store(struct kobject *kobj,
267 struct attribute *attr,
268 const char *data, size_t len)
269{
270 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
271 int val;
32694850 272
83f3aa3d 273 if (strcmp(attr->name, "reset") == 0 && len > 0) {
32694850
TM
274 pd->stats.pkt_started = 0;
275 pd->stats.pkt_ended = 0;
276 pd->stats.secs_w = 0;
277 pd->stats.secs_rg = 0;
278 pd->stats.secs_r = 0;
279
280 } else if (strcmp(attr->name, "congestion_off") == 0
83f3aa3d 281 && sscanf(data, "%d", &val) == 1) {
32694850
TM
282 spin_lock(&pd->lock);
283 pd->write_congestion_off = val;
284 init_write_congestion_marks(&pd->write_congestion_off,
285 &pd->write_congestion_on);
286 spin_unlock(&pd->lock);
287
288 } else if (strcmp(attr->name, "congestion_on") == 0
83f3aa3d 289 && sscanf(data, "%d", &val) == 1) {
32694850
TM
290 spin_lock(&pd->lock);
291 pd->write_congestion_on = val;
292 init_write_congestion_marks(&pd->write_congestion_off,
293 &pd->write_congestion_on);
294 spin_unlock(&pd->lock);
295 }
296 return len;
297}
298
52cf25d0 299static const struct sysfs_ops kobj_pkt_ops = {
32694850
TM
300 .show = kobj_pkt_show,
301 .store = kobj_pkt_store
302};
303static struct kobj_type kobj_pkt_type_stat = {
304 .release = pkt_kobj_release,
305 .sysfs_ops = &kobj_pkt_ops,
306 .default_attrs = kobj_pkt_attrs_stat
307};
308static struct kobj_type kobj_pkt_type_wqueue = {
309 .release = pkt_kobj_release,
310 .sysfs_ops = &kobj_pkt_ops,
311 .default_attrs = kobj_pkt_attrs_wqueue
312};
313
314static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
315{
316 if (class_pktcdvd) {
cba76717 317 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
1ff9f542 318 "%s", pd->name);
6013c12b
TJ
319 if (IS_ERR(pd->dev))
320 pd->dev = NULL;
32694850 321 }
6013c12b 322 if (pd->dev) {
32694850 323 pd->kobj_stat = pkt_kobj_create(pd, "stat",
6013c12b 324 &pd->dev->kobj,
32694850
TM
325 &kobj_pkt_type_stat);
326 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
6013c12b 327 &pd->dev->kobj,
32694850
TM
328 &kobj_pkt_type_wqueue);
329 }
330}
331
332static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
333{
334 pkt_kobj_remove(pd->kobj_stat);
335 pkt_kobj_remove(pd->kobj_wqueue);
336 if (class_pktcdvd)
ca0bf64d 337 device_unregister(pd->dev);
32694850
TM
338}
339
340
341/********************************************************************
342 /sys/class/pktcdvd/
343 add map block device
344 remove unmap packet dev
345 device_map show mappings
346 *******************************************************************/
347
348static void class_pktcdvd_release(struct class *cls)
349{
350 kfree(cls);
351}
dc307f92
GKH
352
353static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
354 char *data)
32694850
TM
355{
356 int n = 0;
357 int idx;
358 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
359 for (idx = 0; idx < MAX_WRITERS; idx++) {
360 struct pktcdvd_device *pd = pkt_devs[idx];
361 if (!pd)
362 continue;
363 n += sprintf(data+n, "%s %u:%u %u:%u\n",
364 pd->name,
365 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
366 MAJOR(pd->bdev->bd_dev),
367 MINOR(pd->bdev->bd_dev));
368 }
369 mutex_unlock(&ctl_mutex);
370 return n;
371}
dc307f92 372static CLASS_ATTR_RO(device_map);
32694850 373
dc307f92
GKH
374static ssize_t add_store(struct class *c, struct class_attribute *attr,
375 const char *buf, size_t count)
32694850
TM
376{
377 unsigned int major, minor;
fffe487d 378
83f3aa3d 379 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
fffe487d
TH
380 /* pkt_setup_dev() expects caller to hold reference to self */
381 if (!try_module_get(THIS_MODULE))
382 return -ENODEV;
383
32694850 384 pkt_setup_dev(MKDEV(major, minor), NULL);
fffe487d
TH
385
386 module_put(THIS_MODULE);
387
32694850
TM
388 return count;
389 }
fffe487d 390
32694850
TM
391 return -EINVAL;
392}
dc307f92 393static CLASS_ATTR_WO(add);
32694850 394
dc307f92
GKH
395static ssize_t remove_store(struct class *c, struct class_attribute *attr,
396 const char *buf, size_t count)
32694850
TM
397{
398 unsigned int major, minor;
83f3aa3d 399 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
32694850
TM
400 pkt_remove_dev(MKDEV(major, minor));
401 return count;
402 }
403 return -EINVAL;
404}
dc307f92 405static CLASS_ATTR_WO(remove);
32694850 406
dc307f92
GKH
407static struct attribute *class_pktcdvd_attrs[] = {
408 &class_attr_add.attr,
409 &class_attr_remove.attr,
410 &class_attr_device_map.attr,
411 NULL,
32694850 412};
dc307f92 413ATTRIBUTE_GROUPS(class_pktcdvd);
32694850
TM
414
415static int pkt_sysfs_init(void)
416{
417 int ret = 0;
418
419 /*
420 * create control files in sysfs
421 * /sys/class/pktcdvd/...
422 */
423 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
424 if (!class_pktcdvd)
425 return -ENOMEM;
426 class_pktcdvd->name = DRIVER_NAME;
427 class_pktcdvd->owner = THIS_MODULE;
428 class_pktcdvd->class_release = class_pktcdvd_release;
dc307f92 429 class_pktcdvd->class_groups = class_pktcdvd_groups;
32694850
TM
430 ret = class_register(class_pktcdvd);
431 if (ret) {
432 kfree(class_pktcdvd);
433 class_pktcdvd = NULL;
99481334 434 pr_err("failed to create class pktcdvd\n");
32694850
TM
435 return ret;
436 }
437 return 0;
438}
439
440static void pkt_sysfs_cleanup(void)
441{
442 if (class_pktcdvd)
443 class_destroy(class_pktcdvd);
444 class_pktcdvd = NULL;
445}
446
447/********************************************************************
448 entries in debugfs
449
156f5a78 450 /sys/kernel/debug/pktcdvd[0-7]/
32694850
TM
451 info
452
453 *******************************************************************/
454
455static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
456{
457 return pkt_seq_show(m, p);
458}
459
460static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
461{
462 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
463}
464
2b8693c0 465static const struct file_operations debug_fops = {
32694850
TM
466 .open = pkt_debugfs_fops_open,
467 .read = seq_read,
468 .llseek = seq_lseek,
469 .release = single_release,
470 .owner = THIS_MODULE,
471};
472
473static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
474{
475 if (!pkt_debugfs_root)
476 return;
32694850 477 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
49c2856a 478 if (!pd->dfs_d_root)
32694850 479 return;
49c2856a 480
5657a819
JP
481 pd->dfs_f_info = debugfs_create_file("info", 0444,
482 pd->dfs_d_root, pd, &debug_fops);
32694850
TM
483}
484
485static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
486{
487 if (!pkt_debugfs_root)
488 return;
49c2856a
DC
489 debugfs_remove(pd->dfs_f_info);
490 debugfs_remove(pd->dfs_d_root);
32694850 491 pd->dfs_f_info = NULL;
32694850
TM
492 pd->dfs_d_root = NULL;
493}
494
495static void pkt_debugfs_init(void)
496{
497 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
32694850
TM
498}
499
500static void pkt_debugfs_cleanup(void)
501{
32694850
TM
502 debugfs_remove(pkt_debugfs_root);
503 pkt_debugfs_root = NULL;
504}
505
506/* ----------------------------------------------------------*/
507
1da177e4
LT
508
509static void pkt_bio_finished(struct pktcdvd_device *pd)
510{
511 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
512 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
844aa797 513 pkt_dbg(2, pd, "queue empty\n");
1da177e4
LT
514 atomic_set(&pd->iosched.attention, 1);
515 wake_up(&pd->wqueue);
516 }
517}
518
1da177e4
LT
519/*
520 * Allocate a packet_data struct
521 */
e1bc89bc 522static struct packet_data *pkt_alloc_packet_data(int frames)
1da177e4
LT
523{
524 int i;
525 struct packet_data *pkt;
526
1107d2e0 527 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
1da177e4
LT
528 if (!pkt)
529 goto no_pkt;
1da177e4 530
e1bc89bc 531 pkt->frames = frames;
ccc5c9ca 532 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
1da177e4
LT
533 if (!pkt->w_bio)
534 goto no_bio;
535
e1bc89bc 536 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
1da177e4
LT
537 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
538 if (!pkt->pages[i])
539 goto no_page;
540 }
541
542 spin_lock_init(&pkt->lock);
c5ecc484 543 bio_list_init(&pkt->orig_bios);
1da177e4 544
e1bc89bc 545 for (i = 0; i < frames; i++) {
ccc5c9ca 546 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
1da177e4
LT
547 if (!bio)
548 goto no_rd_bio;
ccc5c9ca 549
1da177e4
LT
550 pkt->r_bios[i] = bio;
551 }
552
553 return pkt;
554
555no_rd_bio:
e1bc89bc 556 for (i = 0; i < frames; i++) {
1da177e4
LT
557 struct bio *bio = pkt->r_bios[i];
558 if (bio)
559 bio_put(bio);
560 }
561
562no_page:
e1bc89bc 563 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
1da177e4
LT
564 if (pkt->pages[i])
565 __free_page(pkt->pages[i]);
566 bio_put(pkt->w_bio);
567no_bio:
568 kfree(pkt);
569no_pkt:
570 return NULL;
571}
572
573/*
574 * Free a packet_data struct
575 */
576static void pkt_free_packet_data(struct packet_data *pkt)
577{
578 int i;
579
e1bc89bc 580 for (i = 0; i < pkt->frames; i++) {
1da177e4
LT
581 struct bio *bio = pkt->r_bios[i];
582 if (bio)
583 bio_put(bio);
584 }
e1bc89bc 585 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
1da177e4
LT
586 __free_page(pkt->pages[i]);
587 bio_put(pkt->w_bio);
588 kfree(pkt);
589}
590
591static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
592{
593 struct packet_data *pkt, *next;
594
595 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
596
597 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
598 pkt_free_packet_data(pkt);
599 }
e1bc89bc 600 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
1da177e4
LT
601}
602
603static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
604{
605 struct packet_data *pkt;
606
e1bc89bc
PO
607 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
608
1da177e4 609 while (nr_packets > 0) {
e1bc89bc 610 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
1da177e4
LT
611 if (!pkt) {
612 pkt_shrink_pktlist(pd);
613 return 0;
614 }
615 pkt->id = nr_packets;
616 pkt->pd = pd;
617 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
618 nr_packets--;
619 }
620 return 1;
621}
622
1da177e4
LT
623static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
624{
625 struct rb_node *n = rb_next(&node->rb_node);
626 if (!n)
627 return NULL;
628 return rb_entry(n, struct pkt_rb_node, rb_node);
629}
630
ac893963 631static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
1da177e4
LT
632{
633 rb_erase(&node->rb_node, &pd->bio_queue);
64c4bc4d 634 mempool_free(node, &pd->rb_pool);
1da177e4
LT
635 pd->bio_queue_size--;
636 BUG_ON(pd->bio_queue_size < 0);
637}
638
639/*
640 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
641 */
642static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
643{
644 struct rb_node *n = pd->bio_queue.rb_node;
645 struct rb_node *next;
646 struct pkt_rb_node *tmp;
647
648 if (!n) {
649 BUG_ON(pd->bio_queue_size > 0);
650 return NULL;
651 }
652
653 for (;;) {
654 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
4f024f37 655 if (s <= tmp->bio->bi_iter.bi_sector)
1da177e4
LT
656 next = n->rb_left;
657 else
658 next = n->rb_right;
659 if (!next)
660 break;
661 n = next;
662 }
663
4f024f37 664 if (s > tmp->bio->bi_iter.bi_sector) {
1da177e4
LT
665 tmp = pkt_rbtree_next(tmp);
666 if (!tmp)
667 return NULL;
668 }
4f024f37 669 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
1da177e4
LT
670 return tmp;
671}
672
673/*
674 * Insert a node into the pd->bio_queue rb tree.
675 */
676static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
677{
678 struct rb_node **p = &pd->bio_queue.rb_node;
679 struct rb_node *parent = NULL;
4f024f37 680 sector_t s = node->bio->bi_iter.bi_sector;
1da177e4
LT
681 struct pkt_rb_node *tmp;
682
683 while (*p) {
684 parent = *p;
685 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
4f024f37 686 if (s < tmp->bio->bi_iter.bi_sector)
1da177e4
LT
687 p = &(*p)->rb_left;
688 else
689 p = &(*p)->rb_right;
690 }
691 rb_link_node(&node->rb_node, parent, p);
692 rb_insert_color(&node->rb_node, &pd->bio_queue);
693 pd->bio_queue_size++;
694}
695
1da177e4
LT
696/*
697 * Send a packet_command to the underlying block device and
698 * wait for completion.
699 */
700static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
701{
165125e1 702 struct request_queue *q = bdev_get_queue(pd->bdev);
1da177e4 703 struct request *rq;
406c9b60
CH
704 int ret = 0;
705
706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
ff005a06 707 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
a492f075
JL
708 if (IS_ERR(rq))
709 return PTR_ERR(rq);
406c9b60
CH
710
711 if (cgc->buflen) {
8586ea96 712 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
0eb0b63c 713 GFP_NOIO);
8586ea96 714 if (ret)
406c9b60
CH
715 goto out;
716 }
1da177e4 717
82ed4db4
CH
718 scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
719 memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
1da177e4 720
1da177e4 721 rq->timeout = 60*HZ;
1da177e4 722 if (cgc->quiet)
e8064021 723 rq->rq_flags |= RQF_QUIET;
1da177e4 724
406c9b60 725 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
17d5363b 726 if (scsi_req(rq)->result)
cbc31a47 727 ret = -EIO;
406c9b60 728out:
1da177e4 729 blk_put_request(rq);
406c9b60 730 return ret;
1da177e4
LT
731}
732
99481334
JP
733static const char *sense_key_string(__u8 index)
734{
735 static const char * const info[] = {
736 "No sense", "Recovered error", "Not ready",
737 "Medium error", "Hardware error", "Illegal request",
738 "Unit attention", "Data protect", "Blank check",
739 };
740
741 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
742}
743
1da177e4
LT
744/*
745 * A generic sense dump / resolve mechanism should be implemented across
746 * all ATAPI + SCSI devices.
747 */
f3ded788
JP
748static void pkt_dump_sense(struct pktcdvd_device *pd,
749 struct packet_command *cgc)
1da177e4 750{
e7d0748d 751 struct scsi_sense_hdr *sshdr = cgc->sshdr;
1da177e4 752
e7d0748d 753 if (sshdr)
f3ded788
JP
754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
755 CDROM_PACKET_SIZE, cgc->cmd,
e7d0748d
KC
756 sshdr->sense_key, sshdr->asc, sshdr->ascq,
757 sense_key_string(sshdr->sense_key));
99481334 758 else
f3ded788 759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
1da177e4
LT
760}
761
762/*
763 * flush the drive cache to media
764 */
765static int pkt_flush_cache(struct pktcdvd_device *pd)
766{
767 struct packet_command cgc;
768
769 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
770 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
771 cgc.quiet = 1;
772
773 /*
774 * the IMMED bit -- we default to not setting it, although that
775 * would allow a much faster close, this is safer
776 */
777#if 0
778 cgc.cmd[1] = 1 << 1;
779#endif
780 return pkt_generic_packet(pd, &cgc);
781}
782
783/*
784 * speed is given as the normal factor, e.g. 4 for 4x
785 */
05680d86
PO
786static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
787 unsigned write_speed, unsigned read_speed)
1da177e4
LT
788{
789 struct packet_command cgc;
e7d0748d 790 struct scsi_sense_hdr sshdr;
1da177e4
LT
791 int ret;
792
793 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
e7d0748d 794 cgc.sshdr = &sshdr;
1da177e4
LT
795 cgc.cmd[0] = GPCMD_SET_SPEED;
796 cgc.cmd[2] = (read_speed >> 8) & 0xff;
797 cgc.cmd[3] = read_speed & 0xff;
798 cgc.cmd[4] = (write_speed >> 8) & 0xff;
799 cgc.cmd[5] = write_speed & 0xff;
800
ada94973
RH
801 ret = pkt_generic_packet(pd, &cgc);
802 if (ret)
f3ded788 803 pkt_dump_sense(pd, &cgc);
1da177e4
LT
804
805 return ret;
806}
807
808/*
809 * Queue a bio for processing by the low-level CD device. Must be called
810 * from process context.
811 */
46c271be 812static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
1da177e4
LT
813{
814 spin_lock(&pd->iosched.lock);
c5ecc484
AM
815 if (bio_data_dir(bio) == READ)
816 bio_list_add(&pd->iosched.read_queue, bio);
817 else
818 bio_list_add(&pd->iosched.write_queue, bio);
1da177e4
LT
819 spin_unlock(&pd->iosched.lock);
820
821 atomic_set(&pd->iosched.attention, 1);
822 wake_up(&pd->wqueue);
823}
824
825/*
826 * Process the queued read/write requests. This function handles special
827 * requirements for CDRW drives:
828 * - A cache flush command must be inserted before a read request if the
829 * previous request was a write.
46c271be 830 * - Switching between reading and writing is slow, so don't do it more often
1da177e4 831 * than necessary.
46c271be
PO
832 * - Optimize for throughput at the expense of latency. This means that streaming
833 * writes will never be interrupted by a read, but if the drive has to seek
834 * before the next write, switch to reading instead if there are any pending
835 * read requests.
1da177e4
LT
836 * - Set the read speed according to current usage pattern. When only reading
837 * from the device, it's best to use the highest possible read speed, but
838 * when switching often between reading and writing, it's better to have the
839 * same read and write speeds.
1da177e4
LT
840 */
841static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
842{
1da177e4
LT
843
844 if (atomic_read(&pd->iosched.attention) == 0)
845 return;
846 atomic_set(&pd->iosched.attention, 0);
847
1da177e4
LT
848 for (;;) {
849 struct bio *bio;
46c271be 850 int reads_queued, writes_queued;
1da177e4
LT
851
852 spin_lock(&pd->iosched.lock);
c5ecc484
AM
853 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
854 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
1da177e4
LT
855 spin_unlock(&pd->iosched.lock);
856
857 if (!reads_queued && !writes_queued)
858 break;
859
860 if (pd->iosched.writing) {
46c271be
PO
861 int need_write_seek = 1;
862 spin_lock(&pd->iosched.lock);
c5ecc484 863 bio = bio_list_peek(&pd->iosched.write_queue);
46c271be 864 spin_unlock(&pd->iosched.lock);
4f024f37
KO
865 if (bio && (bio->bi_iter.bi_sector ==
866 pd->iosched.last_write))
46c271be
PO
867 need_write_seek = 0;
868 if (need_write_seek && reads_queued) {
1da177e4 869 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
844aa797 870 pkt_dbg(2, pd, "write, waiting\n");
1da177e4
LT
871 break;
872 }
873 pkt_flush_cache(pd);
874 pd->iosched.writing = 0;
875 }
876 } else {
877 if (!reads_queued && writes_queued) {
878 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
844aa797 879 pkt_dbg(2, pd, "read, waiting\n");
1da177e4
LT
880 break;
881 }
882 pd->iosched.writing = 1;
883 }
884 }
885
886 spin_lock(&pd->iosched.lock);
c5ecc484
AM
887 if (pd->iosched.writing)
888 bio = bio_list_pop(&pd->iosched.write_queue);
889 else
890 bio = bio_list_pop(&pd->iosched.read_queue);
1da177e4
LT
891 spin_unlock(&pd->iosched.lock);
892
893 if (!bio)
894 continue;
895
896 if (bio_data_dir(bio) == READ)
4f024f37
KO
897 pd->iosched.successive_reads +=
898 bio->bi_iter.bi_size >> 10;
46c271be 899 else {
1da177e4 900 pd->iosched.successive_reads = 0;
f73a1c7d 901 pd->iosched.last_write = bio_end_sector(bio);
46c271be 902 }
1da177e4
LT
903 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
904 if (pd->read_speed == pd->write_speed) {
905 pd->read_speed = MAX_SPEED;
906 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
907 }
908 } else {
909 if (pd->read_speed != pd->write_speed) {
910 pd->read_speed = pd->write_speed;
911 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
912 }
913 }
914
915 atomic_inc(&pd->cdrw.pending_bios);
ed00aabd 916 submit_bio_noacct(bio);
1da177e4
LT
917 }
918}
919
920/*
921 * Special care is needed if the underlying block device has a small
922 * max_phys_segments value.
923 */
165125e1 924static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
1da177e4 925{
ae03bf63 926 if ((pd->settings.size << 9) / CD_FRAMESIZE
8a78362c 927 <= queue_max_segments(q)) {
1da177e4
LT
928 /*
929 * The cdrom device can handle one segment/frame
930 */
931 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
932 return 0;
ae03bf63 933 } else if ((pd->settings.size << 9) / PAGE_SIZE
8a78362c 934 <= queue_max_segments(q)) {
1da177e4
LT
935 /*
936 * We can handle this case at the expense of some extra memory
937 * copies during write operations
938 */
939 set_bit(PACKET_MERGE_SEGS, &pd->flags);
940 return 0;
941 } else {
fa63c0ab 942 pkt_err(pd, "cdrom max_phys_segments too small\n");
1da177e4
LT
943 return -EIO;
944 }
945}
946
4246a0b6 947static void pkt_end_io_read(struct bio *bio)
1da177e4
LT
948{
949 struct packet_data *pkt = bio->bi_private;
950 struct pktcdvd_device *pd = pkt->pd;
951 BUG_ON(!pd);
952
844aa797 953 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
cd3f2cd0 954 bio, (unsigned long long)pkt->sector,
4e4cbee9 955 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
1da177e4 956
4e4cbee9 957 if (bio->bi_status)
1da177e4
LT
958 atomic_inc(&pkt->io_errors);
959 if (atomic_dec_and_test(&pkt->io_wait)) {
960 atomic_inc(&pkt->run_sm);
961 wake_up(&pd->wqueue);
962 }
963 pkt_bio_finished(pd);
1da177e4
LT
964}
965
4246a0b6 966static void pkt_end_io_packet_write(struct bio *bio)
1da177e4
LT
967{
968 struct packet_data *pkt = bio->bi_private;
969 struct pktcdvd_device *pd = pkt->pd;
970 BUG_ON(!pd);
971
4e4cbee9 972 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
1da177e4
LT
973
974 pd->stats.pkt_ended++;
975
976 pkt_bio_finished(pd);
977 atomic_dec(&pkt->io_wait);
978 atomic_inc(&pkt->run_sm);
979 wake_up(&pd->wqueue);
1da177e4
LT
980}
981
982/*
983 * Schedule reads for the holes in a packet
984 */
985static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
986{
987 int frames_read = 0;
988 struct bio *bio;
989 int f;
990 char written[PACKET_MAX_SIZE];
991
c5ecc484 992 BUG_ON(bio_list_empty(&pkt->orig_bios));
1da177e4
LT
993
994 atomic_set(&pkt->io_wait, 0);
995 atomic_set(&pkt->io_errors, 0);
996
1da177e4
LT
997 /*
998 * Figure out which frames we need to read before we can write.
999 */
1000 memset(written, 0, sizeof(written));
1001 spin_lock(&pkt->lock);
c5ecc484 1002 bio_list_for_each(bio, &pkt->orig_bios) {
4f024f37
KO
1003 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1004 (CD_FRAMESIZE >> 9);
1005 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
06e7ab53 1006 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1da177e4
LT
1007 BUG_ON(first_frame < 0);
1008 BUG_ON(first_frame + num_frames > pkt->frames);
1009 for (f = first_frame; f < first_frame + num_frames; f++)
1010 written[f] = 1;
1011 }
1012 spin_unlock(&pkt->lock);
1013
06e7ab53 1014 if (pkt->cache_valid) {
844aa797 1015 pkt_dbg(2, pd, "zone %llx cached\n",
06e7ab53
PO
1016 (unsigned long long)pkt->sector);
1017 goto out_account;
1018 }
1019
1da177e4
LT
1020 /*
1021 * Schedule reads for missing parts of the packet.
1022 */
1023 for (f = 0; f < pkt->frames; f++) {
1024 int p, offset;
ccc5c9ca 1025
1da177e4
LT
1026 if (written[f])
1027 continue;
ccc5c9ca 1028
1da177e4 1029 bio = pkt->r_bios[f];
ccc5c9ca 1030 bio_reset(bio);
4f024f37 1031 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
74d46992 1032 bio_set_dev(bio, pd->bdev);
1da177e4
LT
1033 bio->bi_end_io = pkt_end_io_read;
1034 bio->bi_private = pkt;
1035
1036 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1037 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
844aa797 1038 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1da177e4
LT
1039 f, pkt->pages[p], offset);
1040 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1041 BUG();
1042
1043 atomic_inc(&pkt->io_wait);
95fe6c1a 1044 bio_set_op_attrs(bio, REQ_OP_READ, 0);
46c271be 1045 pkt_queue_bio(pd, bio);
1da177e4
LT
1046 frames_read++;
1047 }
1048
1049out_account:
844aa797 1050 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1da177e4
LT
1051 frames_read, (unsigned long long)pkt->sector);
1052 pd->stats.pkt_started++;
1053 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1da177e4
LT
1054}
1055
1056/*
1057 * Find a packet matching zone, or the least recently used packet if
1058 * there is no match.
1059 */
1060static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1061{
1062 struct packet_data *pkt;
1063
1064 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1065 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1066 list_del_init(&pkt->list);
1067 if (pkt->sector != zone)
1068 pkt->cache_valid = 0;
610827de 1069 return pkt;
1da177e4
LT
1070 }
1071 }
610827de
PO
1072 BUG();
1073 return NULL;
1da177e4
LT
1074}
1075
1076static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1077{
1078 if (pkt->cache_valid) {
1079 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1080 } else {
1081 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1082 }
1083}
1084
1da177e4
LT
1085static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1086{
1087#if PACKET_DEBUG > 1
1088 static const char *state_name[] = {
1089 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1090 };
1091 enum packet_data_state old_state = pkt->state;
844aa797 1092 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
cd3f2cd0 1093 pkt->id, (unsigned long long)pkt->sector,
1da177e4
LT
1094 state_name[old_state], state_name[state]);
1095#endif
1096 pkt->state = state;
1097}
1098
1099/*
1100 * Scan the work queue to see if we can start a new packet.
1101 * returns non-zero if any work was done.
1102 */
1103static int pkt_handle_queue(struct pktcdvd_device *pd)
1104{
1105 struct packet_data *pkt, *p;
1106 struct bio *bio = NULL;
1107 sector_t zone = 0; /* Suppress gcc warning */
1108 struct pkt_rb_node *node, *first_node;
1109 struct rb_node *n;
0a0fc960 1110 int wakeup;
1da177e4 1111
1da177e4
LT
1112 atomic_set(&pd->scan_queue, 0);
1113
1114 if (list_empty(&pd->cdrw.pkt_free_list)) {
844aa797 1115 pkt_dbg(2, pd, "no pkt\n");
1da177e4
LT
1116 return 0;
1117 }
1118
1119 /*
1120 * Try to find a zone we are not already working on.
1121 */
1122 spin_lock(&pd->lock);
1123 first_node = pkt_rbtree_find(pd, pd->current_sector);
1124 if (!first_node) {
1125 n = rb_first(&pd->bio_queue);
1126 if (n)
1127 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1128 }
1129 node = first_node;
1130 while (node) {
1131 bio = node->bio;
4f024f37 1132 zone = get_zone(bio->bi_iter.bi_sector, pd);
1da177e4 1133 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
7baeb6a5
PO
1134 if (p->sector == zone) {
1135 bio = NULL;
1da177e4 1136 goto try_next_bio;
7baeb6a5 1137 }
1da177e4
LT
1138 }
1139 break;
1140try_next_bio:
1141 node = pkt_rbtree_next(node);
1142 if (!node) {
1143 n = rb_first(&pd->bio_queue);
1144 if (n)
1145 node = rb_entry(n, struct pkt_rb_node, rb_node);
1146 }
1147 if (node == first_node)
1148 node = NULL;
1149 }
1150 spin_unlock(&pd->lock);
1151 if (!bio) {
844aa797 1152 pkt_dbg(2, pd, "no bio\n");
1da177e4
LT
1153 return 0;
1154 }
1155
1156 pkt = pkt_get_packet_data(pd, zone);
1da177e4
LT
1157
1158 pd->current_sector = zone + pd->settings.size;
1159 pkt->sector = zone;
e1bc89bc 1160 BUG_ON(pkt->frames != pd->settings.size >> 2);
1da177e4
LT
1161 pkt->write_size = 0;
1162
1163 /*
1164 * Scan work queue for bios in the same zone and link them
1165 * to this packet.
1166 */
1167 spin_lock(&pd->lock);
844aa797 1168 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1da177e4
LT
1169 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1170 bio = node->bio;
4f024f37
KO
1171 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1172 get_zone(bio->bi_iter.bi_sector, pd));
1173 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1da177e4
LT
1174 break;
1175 pkt_rbtree_erase(pd, node);
1176 spin_lock(&pkt->lock);
c5ecc484 1177 bio_list_add(&pkt->orig_bios, bio);
4f024f37 1178 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1da177e4
LT
1179 spin_unlock(&pkt->lock);
1180 }
0a0fc960
TM
1181 /* check write congestion marks, and if bio_queue_size is
1182 below, wake up any waiters */
1183 wakeup = (pd->write_congestion_on > 0
1184 && pd->bio_queue_size <= pd->write_congestion_off);
1da177e4 1185 spin_unlock(&pd->lock);
8aa7e847 1186 if (wakeup) {
dc3b17cc 1187 clear_bdi_congested(pd->disk->queue->backing_dev_info,
8aa7e847
JA
1188 BLK_RW_ASYNC);
1189 }
1da177e4
LT
1190
1191 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1192 pkt_set_state(pkt, PACKET_WAITING_STATE);
1193 atomic_set(&pkt->run_sm, 1);
1194
1195 spin_lock(&pd->cdrw.active_list_lock);
1196 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1197 spin_unlock(&pd->cdrw.active_list_lock);
1198
1199 return 1;
1200}
1201
1202/*
1203 * Assemble a bio to write one packet and queue the bio for processing
1204 * by the underlying block device.
1205 */
1206static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1207{
1da177e4 1208 int f;
1da177e4 1209
ffb25dc6 1210 bio_reset(pkt->w_bio);
4f024f37 1211 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
74d46992 1212 bio_set_dev(pkt->w_bio, pd->bdev);
ffb25dc6
KO
1213 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1214 pkt->w_bio->bi_private = pkt;
1215
1216 /* XXX: locking? */
1da177e4 1217 for (f = 0; f < pkt->frames; f++) {
feebd568
CH
1218 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1219 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1220
1221 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
ffb25dc6 1222 BUG();
1da177e4 1223 }
844aa797 1224 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1da177e4
LT
1225
1226 /*
72772323 1227 * Fill-in bvec with data from orig_bios.
1da177e4 1228 */
1da177e4 1229 spin_lock(&pkt->lock);
45db54d5 1230 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
1da177e4 1231
1da177e4
LT
1232 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1233 spin_unlock(&pkt->lock);
1234
844aa797 1235 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
ffb25dc6 1236 pkt->write_size, (unsigned long long)pkt->sector);
1da177e4 1237
feebd568 1238 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
1da177e4 1239 pkt->cache_valid = 1;
feebd568 1240 else
1da177e4 1241 pkt->cache_valid = 0;
1da177e4
LT
1242
1243 /* Start the write request */
1da177e4 1244 atomic_set(&pkt->io_wait, 1);
95fe6c1a 1245 bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
46c271be 1246 pkt_queue_bio(pd, pkt->w_bio);
1da177e4
LT
1247}
1248
4e4cbee9 1249static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
1da177e4 1250{
c5ecc484 1251 struct bio *bio;
1da177e4 1252
4e4cbee9 1253 if (status)
1da177e4
LT
1254 pkt->cache_valid = 0;
1255
1256 /* Finish all bios corresponding to this packet */
4246a0b6 1257 while ((bio = bio_list_pop(&pkt->orig_bios))) {
4e4cbee9 1258 bio->bi_status = status;
4246a0b6
CH
1259 bio_endio(bio);
1260 }
1da177e4
LT
1261}
1262
1263static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1264{
844aa797 1265 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1da177e4
LT
1266
1267 for (;;) {
1268 switch (pkt->state) {
1269 case PACKET_WAITING_STATE:
1270 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1271 return;
1272
1273 pkt->sleep_time = 0;
1274 pkt_gather_data(pd, pkt);
1275 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1276 break;
1277
1278 case PACKET_READ_WAIT_STATE:
1279 if (atomic_read(&pkt->io_wait) > 0)
1280 return;
1281
1282 if (atomic_read(&pkt->io_errors) > 0) {
1283 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1284 } else {
1285 pkt_start_write(pd, pkt);
1286 }
1287 break;
1288
1289 case PACKET_WRITE_WAIT_STATE:
1290 if (atomic_read(&pkt->io_wait) > 0)
1291 return;
1292
4e4cbee9 1293 if (!pkt->w_bio->bi_status) {
1da177e4
LT
1294 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1295 } else {
1296 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1297 }
1298 break;
1299
1300 case PACKET_RECOVERY_STATE:
2d9e28a9
CH
1301 pkt_dbg(2, pd, "No recovery possible\n");
1302 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1da177e4
LT
1303 break;
1304
1305 case PACKET_FINISHED_STATE:
4e4cbee9 1306 pkt_finish_packet(pkt, pkt->w_bio->bi_status);
1da177e4
LT
1307 return;
1308
1309 default:
1310 BUG();
1311 break;
1312 }
1313 }
1314}
1315
1316static void pkt_handle_packets(struct pktcdvd_device *pd)
1317{
1318 struct packet_data *pkt, *next;
1319
1da177e4
LT
1320 /*
1321 * Run state machine for active packets
1322 */
1323 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1324 if (atomic_read(&pkt->run_sm) > 0) {
1325 atomic_set(&pkt->run_sm, 0);
1326 pkt_run_state_machine(pd, pkt);
1327 }
1328 }
1329
1330 /*
1331 * Move no longer active packets to the free list
1332 */
1333 spin_lock(&pd->cdrw.active_list_lock);
1334 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1335 if (pkt->state == PACKET_FINISHED_STATE) {
1336 list_del(&pkt->list);
1337 pkt_put_packet_data(pd, pkt);
1338 pkt_set_state(pkt, PACKET_IDLE_STATE);
1339 atomic_set(&pd->scan_queue, 1);
1340 }
1341 }
1342 spin_unlock(&pd->cdrw.active_list_lock);
1343}
1344
1345static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1346{
1347 struct packet_data *pkt;
1348 int i;
1349
ae7642bb 1350 for (i = 0; i < PACKET_NUM_STATES; i++)
1da177e4
LT
1351 states[i] = 0;
1352
1353 spin_lock(&pd->cdrw.active_list_lock);
1354 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1355 states[pkt->state]++;
1356 }
1357 spin_unlock(&pd->cdrw.active_list_lock);
1358}
1359
1360/*
1361 * kcdrwd is woken up when writes have been queued for one of our
1362 * registered devices
1363 */
1364static int kcdrwd(void *foobar)
1365{
1366 struct pktcdvd_device *pd = foobar;
1367 struct packet_data *pkt;
1368 long min_sleep_time, residue;
1369
8698a745 1370 set_user_nice(current, MIN_NICE);
83144186 1371 set_freezable();
1da177e4
LT
1372
1373 for (;;) {
1374 DECLARE_WAITQUEUE(wait, current);
1375
1376 /*
1377 * Wait until there is something to do
1378 */
1379 add_wait_queue(&pd->wqueue, &wait);
1380 for (;;) {
1381 set_current_state(TASK_INTERRUPTIBLE);
1382
1383 /* Check if we need to run pkt_handle_queue */
1384 if (atomic_read(&pd->scan_queue) > 0)
1385 goto work_to_do;
1386
1387 /* Check if we need to run the state machine for some packet */
1388 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1389 if (atomic_read(&pkt->run_sm) > 0)
1390 goto work_to_do;
1391 }
1392
1393 /* Check if we need to process the iosched queues */
1394 if (atomic_read(&pd->iosched.attention) != 0)
1395 goto work_to_do;
1396
1397 /* Otherwise, go to sleep */
1398 if (PACKET_DEBUG > 1) {
1399 int states[PACKET_NUM_STATES];
1400 pkt_count_states(pd, states);
844aa797 1401 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
cd3f2cd0
JP
1402 states[0], states[1], states[2],
1403 states[3], states[4], states[5]);
1da177e4
LT
1404 }
1405
1406 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1407 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1408 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1409 min_sleep_time = pkt->sleep_time;
1410 }
1411
844aa797 1412 pkt_dbg(2, pd, "sleeping\n");
1da177e4 1413 residue = schedule_timeout(min_sleep_time);
844aa797 1414 pkt_dbg(2, pd, "wake up\n");
1da177e4
LT
1415
1416 /* make swsusp happy with our thread */
3e1d1d28 1417 try_to_freeze();
1da177e4
LT
1418
1419 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1420 if (!pkt->sleep_time)
1421 continue;
1422 pkt->sleep_time -= min_sleep_time - residue;
1423 if (pkt->sleep_time <= 0) {
1424 pkt->sleep_time = 0;
1425 atomic_inc(&pkt->run_sm);
1426 }
1427 }
1428
1da177e4
LT
1429 if (kthread_should_stop())
1430 break;
1431 }
1432work_to_do:
1433 set_current_state(TASK_RUNNING);
1434 remove_wait_queue(&pd->wqueue, &wait);
1435
1436 if (kthread_should_stop())
1437 break;
1438
1439 /*
1440 * if pkt_handle_queue returns true, we can queue
1441 * another request.
1442 */
1443 while (pkt_handle_queue(pd))
1444 ;
1445
1446 /*
1447 * Handle packet state machine
1448 */
1449 pkt_handle_packets(pd);
1450
1451 /*
1452 * Handle iosched queues
1453 */
1454 pkt_iosched_process_queue(pd);
1455 }
1456
1457 return 0;
1458}
1459
1460static void pkt_print_settings(struct pktcdvd_device *pd)
1461{
0c075d64
JP
1462 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
1463 pd->settings.fp ? "Fixed" : "Variable",
1464 pd->settings.size >> 2,
1465 pd->settings.block_mode == 8 ? '1' : '2');
1da177e4
LT
1466}
1467
1468static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1469{
1470 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1471
1472 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1473 cgc->cmd[2] = page_code | (page_control << 6);
1474 cgc->cmd[7] = cgc->buflen >> 8;
1475 cgc->cmd[8] = cgc->buflen & 0xff;
1476 cgc->data_direction = CGC_DATA_READ;
1477 return pkt_generic_packet(pd, cgc);
1478}
1479
1480static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1481{
1482 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1483 memset(cgc->buffer, 0, 2);
1484 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1485 cgc->cmd[1] = 0x10; /* PF */
1486 cgc->cmd[7] = cgc->buflen >> 8;
1487 cgc->cmd[8] = cgc->buflen & 0xff;
1488 cgc->data_direction = CGC_DATA_WRITE;
1489 return pkt_generic_packet(pd, cgc);
1490}
1491
1492static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1493{
1494 struct packet_command cgc;
1495 int ret;
1496
1497 /* set up command and get the disc info */
1498 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1499 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1500 cgc.cmd[8] = cgc.buflen = 2;
1501 cgc.quiet = 1;
1502
ada94973
RH
1503 ret = pkt_generic_packet(pd, &cgc);
1504 if (ret)
1da177e4
LT
1505 return ret;
1506
1507 /* not all drives have the same disc_info length, so requeue
1508 * packet with the length the drive tells us it can supply
1509 */
1510 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1511 sizeof(di->disc_information_length);
1512
1513 if (cgc.buflen > sizeof(disc_information))
1514 cgc.buflen = sizeof(disc_information);
1515
1516 cgc.cmd[8] = cgc.buflen;
1517 return pkt_generic_packet(pd, &cgc);
1518}
1519
1520static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1521{
1522 struct packet_command cgc;
1523 int ret;
1524
1525 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1526 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1527 cgc.cmd[1] = type & 3;
1528 cgc.cmd[4] = (track & 0xff00) >> 8;
1529 cgc.cmd[5] = track & 0xff;
1530 cgc.cmd[8] = 8;
1531 cgc.quiet = 1;
1532
ada94973
RH
1533 ret = pkt_generic_packet(pd, &cgc);
1534 if (ret)
1da177e4
LT
1535 return ret;
1536
1537 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1538 sizeof(ti->track_information_length);
1539
1540 if (cgc.buflen > sizeof(track_information))
1541 cgc.buflen = sizeof(track_information);
1542
1543 cgc.cmd[8] = cgc.buflen;
1544 return pkt_generic_packet(pd, &cgc);
1545}
1546
05680d86
PO
1547static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1548 long *last_written)
1da177e4
LT
1549{
1550 disc_information di;
1551 track_information ti;
1552 __u32 last_track;
8d20319e 1553 int ret;
1da177e4 1554
ada94973
RH
1555 ret = pkt_get_disc_info(pd, &di);
1556 if (ret)
1da177e4
LT
1557 return ret;
1558
1559 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
ada94973
RH
1560 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1561 if (ret)
1da177e4
LT
1562 return ret;
1563
1564 /* if this track is blank, try the previous. */
1565 if (ti.blank) {
1566 last_track--;
ada94973
RH
1567 ret = pkt_get_track_info(pd, last_track, 1, &ti);
1568 if (ret)
1da177e4
LT
1569 return ret;
1570 }
1571
1572 /* if last recorded field is valid, return it. */
1573 if (ti.lra_v) {
1574 *last_written = be32_to_cpu(ti.last_rec_address);
1575 } else {
1576 /* make it up instead */
1577 *last_written = be32_to_cpu(ti.track_start) +
1578 be32_to_cpu(ti.track_size);
1579 if (ti.free_blocks)
1580 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1581 }
1582 return 0;
1583}
1584
1585/*
1586 * write mode select package based on pd->settings
1587 */
05680d86 1588static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1da177e4
LT
1589{
1590 struct packet_command cgc;
e7d0748d 1591 struct scsi_sense_hdr sshdr;
1da177e4
LT
1592 write_param_page *wp;
1593 char buffer[128];
1594 int ret, size;
1595
1596 /* doesn't apply to DVD+RW or DVD-RAM */
1597 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1598 return 0;
1599
1600 memset(buffer, 0, sizeof(buffer));
1601 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
e7d0748d 1602 cgc.sshdr = &sshdr;
ada94973
RH
1603 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1604 if (ret) {
f3ded788 1605 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1606 return ret;
1607 }
1608
1609 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1610 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1611 if (size > sizeof(buffer))
1612 size = sizeof(buffer);
1613
1614 /*
1615 * now get it all
1616 */
1617 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
e7d0748d 1618 cgc.sshdr = &sshdr;
ada94973
RH
1619 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
1620 if (ret) {
f3ded788 1621 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1622 return ret;
1623 }
1624
1625 /*
1626 * write page is offset header + block descriptor length
1627 */
1628 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1629
1630 wp->fp = pd->settings.fp;
1631 wp->track_mode = pd->settings.track_mode;
1632 wp->write_type = pd->settings.write_type;
1633 wp->data_block_type = pd->settings.block_mode;
1634
1635 wp->multi_session = 0;
1636
1637#ifdef PACKET_USE_LS
1638 wp->link_size = 7;
1639 wp->ls_v = 1;
1640#endif
1641
1642 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1643 wp->session_format = 0;
1644 wp->subhdr2 = 0x20;
1645 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1646 wp->session_format = 0x20;
1647 wp->subhdr2 = 8;
1648#if 0
1649 wp->mcn[0] = 0x80;
1650 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1651#endif
1652 } else {
1653 /*
1654 * paranoia
1655 */
fa63c0ab 1656 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1da177e4
LT
1657 return 1;
1658 }
1659 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1660
1661 cgc.buflen = cgc.cmd[8] = size;
ada94973
RH
1662 ret = pkt_mode_select(pd, &cgc);
1663 if (ret) {
f3ded788 1664 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1665 return ret;
1666 }
1667
1668 pkt_print_settings(pd);
1669 return 0;
1670}
1671
1672/*
7c613d59 1673 * 1 -- we can write to this track, 0 -- we can't
1da177e4 1674 */
ab863ec3 1675static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1da177e4 1676{
ab863ec3
PO
1677 switch (pd->mmc3_profile) {
1678 case 0x1a: /* DVD+RW */
1679 case 0x12: /* DVD-RAM */
1680 /* The track is always writable on DVD+RW/DVD-RAM */
1681 return 1;
1682 default:
1683 break;
1684 }
1da177e4 1685
ab863ec3
PO
1686 if (!ti->packet || !ti->fp)
1687 return 0;
1da177e4
LT
1688
1689 /*
1690 * "good" settings as per Mt Fuji.
1691 */
ab863ec3 1692 if (ti->rt == 0 && ti->blank == 0)
7c613d59 1693 return 1;
1da177e4 1694
ab863ec3 1695 if (ti->rt == 0 && ti->blank == 1)
7c613d59 1696 return 1;
1da177e4 1697
ab863ec3 1698 if (ti->rt == 1 && ti->blank == 0)
7c613d59 1699 return 1;
1da177e4 1700
fa63c0ab 1701 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
7c613d59 1702 return 0;
1da177e4
LT
1703}
1704
1705/*
7c613d59 1706 * 1 -- we can write to this disc, 0 -- we can't
1da177e4 1707 */
7c613d59 1708static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1da177e4
LT
1709{
1710 switch (pd->mmc3_profile) {
1711 case 0x0a: /* CD-RW */
1712 case 0xffff: /* MMC3 not supported */
1713 break;
1714 case 0x1a: /* DVD+RW */
1715 case 0x13: /* DVD-RW */
1716 case 0x12: /* DVD-RAM */
7c613d59 1717 return 1;
1da177e4 1718 default:
844aa797 1719 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
cd3f2cd0 1720 pd->mmc3_profile);
7c613d59 1721 return 0;
1da177e4
LT
1722 }
1723
1724 /*
1725 * for disc type 0xff we should probably reserve a new track.
1726 * but i'm not sure, should we leave this to user apps? probably.
1727 */
1728 if (di->disc_type == 0xff) {
ca73dabc 1729 pkt_notice(pd, "unknown disc - no track?\n");
7c613d59 1730 return 0;
1da177e4
LT
1731 }
1732
1733 if (di->disc_type != 0x20 && di->disc_type != 0) {
fa63c0ab 1734 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
7c613d59 1735 return 0;
1da177e4
LT
1736 }
1737
1738 if (di->erasable == 0) {
ca73dabc 1739 pkt_notice(pd, "disc not erasable\n");
7c613d59 1740 return 0;
1da177e4
LT
1741 }
1742
1743 if (di->border_status == PACKET_SESSION_RESERVED) {
fa63c0ab 1744 pkt_err(pd, "can't write to last track (reserved)\n");
7c613d59 1745 return 0;
1da177e4
LT
1746 }
1747
7c613d59 1748 return 1;
1da177e4
LT
1749}
1750
05680d86 1751static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1da177e4
LT
1752{
1753 struct packet_command cgc;
1754 unsigned char buf[12];
1755 disc_information di;
1756 track_information ti;
1757 int ret, track;
1758
1759 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1760 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1761 cgc.cmd[8] = 8;
1762 ret = pkt_generic_packet(pd, &cgc);
1763 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1764
1765 memset(&di, 0, sizeof(disc_information));
1766 memset(&ti, 0, sizeof(track_information));
1767
ada94973
RH
1768 ret = pkt_get_disc_info(pd, &di);
1769 if (ret) {
fa63c0ab 1770 pkt_err(pd, "failed get_disc\n");
1da177e4
LT
1771 return ret;
1772 }
1773
7c613d59 1774 if (!pkt_writable_disc(pd, &di))
9db91546 1775 return -EROFS;
1da177e4 1776
1da177e4
LT
1777 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1778
1779 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
ada94973
RH
1780 ret = pkt_get_track_info(pd, track, 1, &ti);
1781 if (ret) {
fa63c0ab 1782 pkt_err(pd, "failed get_track\n");
1da177e4
LT
1783 return ret;
1784 }
1785
ab863ec3 1786 if (!pkt_writable_track(pd, &ti)) {
fa63c0ab 1787 pkt_err(pd, "can't write to this track\n");
9db91546 1788 return -EROFS;
1da177e4
LT
1789 }
1790
1791 /*
1792 * we keep packet size in 512 byte units, makes it easier to
1793 * deal with request calculations.
1794 */
1795 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1796 if (pd->settings.size == 0) {
ca73dabc 1797 pkt_notice(pd, "detected zero packet size!\n");
a460ad62 1798 return -ENXIO;
1da177e4 1799 }
d0272e78 1800 if (pd->settings.size > PACKET_MAX_SECTORS) {
fa63c0ab 1801 pkt_err(pd, "packet size is too big\n");
9db91546 1802 return -EROFS;
d0272e78 1803 }
1da177e4
LT
1804 pd->settings.fp = ti.fp;
1805 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1806
1807 if (ti.nwa_v) {
1808 pd->nwa = be32_to_cpu(ti.next_writable);
1809 set_bit(PACKET_NWA_VALID, &pd->flags);
1810 }
1811
1812 /*
1813 * in theory we could use lra on -RW media as well and just zero
1814 * blocks that haven't been written yet, but in practice that
1815 * is just a no-go. we'll use that for -R, naturally.
1816 */
1817 if (ti.lra_v) {
1818 pd->lra = be32_to_cpu(ti.last_rec_address);
1819 set_bit(PACKET_LRA_VALID, &pd->flags);
1820 } else {
1821 pd->lra = 0xffffffff;
1822 set_bit(PACKET_LRA_VALID, &pd->flags);
1823 }
1824
1825 /*
1826 * fine for now
1827 */
1828 pd->settings.link_loss = 7;
1829 pd->settings.write_type = 0; /* packet */
1830 pd->settings.track_mode = ti.track_mode;
1831
1832 /*
1833 * mode1 or mode2 disc
1834 */
1835 switch (ti.data_mode) {
1836 case PACKET_MODE1:
1837 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1838 break;
1839 case PACKET_MODE2:
1840 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1841 break;
1842 default:
fa63c0ab 1843 pkt_err(pd, "unknown data mode\n");
9db91546 1844 return -EROFS;
1da177e4
LT
1845 }
1846 return 0;
1847}
1848
1849/*
1850 * enable/disable write caching on drive
1851 */
05680d86
PO
1852static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1853 int set)
1da177e4
LT
1854{
1855 struct packet_command cgc;
e7d0748d 1856 struct scsi_sense_hdr sshdr;
1da177e4
LT
1857 unsigned char buf[64];
1858 int ret;
1859
1da177e4 1860 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
e7d0748d 1861 cgc.sshdr = &sshdr;
1da177e4
LT
1862 cgc.buflen = pd->mode_offset + 12;
1863
1864 /*
1865 * caching mode page might not be there, so quiet this command
1866 */
1867 cgc.quiet = 1;
1868
ada94973
RH
1869 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
1870 if (ret)
1da177e4
LT
1871 return ret;
1872
1873 buf[pd->mode_offset + 10] |= (!!set << 2);
1874
1875 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1876 ret = pkt_mode_select(pd, &cgc);
1877 if (ret) {
fa63c0ab 1878 pkt_err(pd, "write caching control failed\n");
f3ded788 1879 pkt_dump_sense(pd, &cgc);
1da177e4 1880 } else if (!ret && set)
ca73dabc 1881 pkt_notice(pd, "enabled write caching\n");
1da177e4
LT
1882 return ret;
1883}
1884
1885static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1886{
1887 struct packet_command cgc;
1888
1889 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1890 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1891 cgc.cmd[4] = lockflag ? 1 : 0;
1892 return pkt_generic_packet(pd, &cgc);
1893}
1894
1895/*
1896 * Returns drive maximum write speed
1897 */
05680d86
PO
1898static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1899 unsigned *write_speed)
1da177e4
LT
1900{
1901 struct packet_command cgc;
e7d0748d 1902 struct scsi_sense_hdr sshdr;
1da177e4
LT
1903 unsigned char buf[256+18];
1904 unsigned char *cap_buf;
1905 int ret, offset;
1906
1da177e4
LT
1907 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1908 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
e7d0748d 1909 cgc.sshdr = &sshdr;
1da177e4
LT
1910
1911 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1912 if (ret) {
1913 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1914 sizeof(struct mode_page_header);
1915 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1916 if (ret) {
f3ded788 1917 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1918 return ret;
1919 }
1920 }
1921
1922 offset = 20; /* Obsoleted field, used by older drives */
1923 if (cap_buf[1] >= 28)
1924 offset = 28; /* Current write speed selected */
1925 if (cap_buf[1] >= 30) {
1926 /* If the drive reports at least one "Logical Unit Write
1927 * Speed Performance Descriptor Block", use the information
1928 * in the first block. (contains the highest speed)
1929 */
1930 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1931 if (num_spdb > 0)
1932 offset = 34;
1933 }
1934
1935 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1936 return 0;
1937}
1938
1939/* These tables from cdrecord - I don't have orange book */
1940/* standard speed CD-RW (1-4x) */
1941static char clv_to_speed[16] = {
1942 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1943 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1944};
1945/* high speed CD-RW (-10x) */
1946static char hs_clv_to_speed[16] = {
1947 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1948 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1949};
1950/* ultra high speed CD-RW */
1951static char us_clv_to_speed[16] = {
1952 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1953 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1954};
1955
1956/*
1957 * reads the maximum media speed from ATIP
1958 */
05680d86
PO
1959static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
1960 unsigned *speed)
1da177e4
LT
1961{
1962 struct packet_command cgc;
e7d0748d 1963 struct scsi_sense_hdr sshdr;
1da177e4
LT
1964 unsigned char buf[64];
1965 unsigned int size, st, sp;
1966 int ret;
1967
1968 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
e7d0748d 1969 cgc.sshdr = &sshdr;
1da177e4
LT
1970 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1971 cgc.cmd[1] = 2;
1972 cgc.cmd[2] = 4; /* READ ATIP */
1973 cgc.cmd[8] = 2;
1974 ret = pkt_generic_packet(pd, &cgc);
1975 if (ret) {
f3ded788 1976 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1977 return ret;
1978 }
1979 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1980 if (size > sizeof(buf))
1981 size = sizeof(buf);
1982
1983 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
e7d0748d 1984 cgc.sshdr = &sshdr;
1da177e4
LT
1985 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1986 cgc.cmd[1] = 2;
1987 cgc.cmd[2] = 4;
1988 cgc.cmd[8] = size;
1989 ret = pkt_generic_packet(pd, &cgc);
1990 if (ret) {
f3ded788 1991 pkt_dump_sense(pd, &cgc);
1da177e4
LT
1992 return ret;
1993 }
1994
eaa0ff15 1995 if (!(buf[6] & 0x40)) {
ca73dabc 1996 pkt_notice(pd, "disc type is not CD-RW\n");
1da177e4
LT
1997 return 1;
1998 }
eaa0ff15 1999 if (!(buf[6] & 0x4)) {
ca73dabc 2000 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
1da177e4
LT
2001 return 1;
2002 }
2003
2004 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2005
2006 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2007
2008 /* Info from cdrecord */
2009 switch (st) {
2010 case 0: /* standard speed */
2011 *speed = clv_to_speed[sp];
2012 break;
2013 case 1: /* high speed */
2014 *speed = hs_clv_to_speed[sp];
2015 break;
2016 case 2: /* ultra high speed */
2017 *speed = us_clv_to_speed[sp];
2018 break;
2019 default:
ca73dabc 2020 pkt_notice(pd, "unknown disc sub-type %d\n", st);
1da177e4
LT
2021 return 1;
2022 }
2023 if (*speed) {
0c075d64 2024 pkt_info(pd, "maximum media speed: %d\n", *speed);
1da177e4
LT
2025 return 0;
2026 } else {
ca73dabc 2027 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
1da177e4
LT
2028 return 1;
2029 }
2030}
2031
05680d86 2032static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
1da177e4
LT
2033{
2034 struct packet_command cgc;
e7d0748d 2035 struct scsi_sense_hdr sshdr;
1da177e4
LT
2036 int ret;
2037
844aa797 2038 pkt_dbg(2, pd, "Performing OPC\n");
1da177e4
LT
2039
2040 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
e7d0748d 2041 cgc.sshdr = &sshdr;
1da177e4
LT
2042 cgc.timeout = 60*HZ;
2043 cgc.cmd[0] = GPCMD_SEND_OPC;
2044 cgc.cmd[1] = 1;
ada94973
RH
2045 ret = pkt_generic_packet(pd, &cgc);
2046 if (ret)
f3ded788 2047 pkt_dump_sense(pd, &cgc);
1da177e4
LT
2048 return ret;
2049}
2050
2051static int pkt_open_write(struct pktcdvd_device *pd)
2052{
2053 int ret;
2054 unsigned int write_speed, media_write_speed, read_speed;
2055
ada94973
RH
2056 ret = pkt_probe_settings(pd);
2057 if (ret) {
844aa797 2058 pkt_dbg(2, pd, "failed probe\n");
9db91546 2059 return ret;
1da177e4
LT
2060 }
2061
ada94973
RH
2062 ret = pkt_set_write_settings(pd);
2063 if (ret) {
844aa797 2064 pkt_dbg(1, pd, "failed saving write settings\n");
1da177e4
LT
2065 return -EIO;
2066 }
2067
2068 pkt_write_caching(pd, USE_WCACHING);
2069
ada94973
RH
2070 ret = pkt_get_max_speed(pd, &write_speed);
2071 if (ret)
1da177e4
LT
2072 write_speed = 16 * 177;
2073 switch (pd->mmc3_profile) {
2074 case 0x13: /* DVD-RW */
2075 case 0x1a: /* DVD+RW */
2076 case 0x12: /* DVD-RAM */
844aa797 2077 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
1da177e4
LT
2078 break;
2079 default:
ada94973
RH
2080 ret = pkt_media_speed(pd, &media_write_speed);
2081 if (ret)
1da177e4
LT
2082 media_write_speed = 16;
2083 write_speed = min(write_speed, media_write_speed * 177);
844aa797 2084 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
1da177e4
LT
2085 break;
2086 }
2087 read_speed = write_speed;
2088
ada94973
RH
2089 ret = pkt_set_speed(pd, write_speed, read_speed);
2090 if (ret) {
844aa797 2091 pkt_dbg(1, pd, "couldn't set write speed\n");
1da177e4
LT
2092 return -EIO;
2093 }
2094 pd->write_speed = write_speed;
2095 pd->read_speed = read_speed;
2096
ada94973
RH
2097 ret = pkt_perform_opc(pd);
2098 if (ret) {
844aa797 2099 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
1da177e4
LT
2100 }
2101
2102 return 0;
2103}
2104
2105/*
2106 * called at open time.
2107 */
aeb5d727 2108static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
1da177e4
LT
2109{
2110 int ret;
2111 long lba;
165125e1 2112 struct request_queue *q;
b8d95484 2113 struct block_device *bdev;
1da177e4
LT
2114
2115 /*
2116 * We need to re-open the cdrom device without O_NONBLOCK to be able
2117 * to read/write from/to it. It is already opened in O_NONBLOCK mode
b8d95484 2118 * so open should not fail.
1da177e4 2119 */
b8d95484
CH
2120 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
2121 if (IS_ERR(bdev)) {
2122 ret = PTR_ERR(bdev);
1da177e4 2123 goto out;
b8d95484 2124 }
1da177e4 2125
ada94973
RH
2126 ret = pkt_get_last_written(pd, &lba);
2127 if (ret) {
fa63c0ab 2128 pkt_err(pd, "pkt_get_last_written failed\n");
e525fd89 2129 goto out_putdev;
1da177e4
LT
2130 }
2131
2132 set_capacity(pd->disk, lba << 2);
2133 set_capacity(pd->bdev->bd_disk, lba << 2);
611bee52 2134 bd_set_nr_sectors(pd->bdev, lba << 2);
1da177e4
LT
2135
2136 q = bdev_get_queue(pd->bdev);
2137 if (write) {
ada94973
RH
2138 ret = pkt_open_write(pd);
2139 if (ret)
e525fd89 2140 goto out_putdev;
1da177e4
LT
2141 /*
2142 * Some CDRW drives can not handle writes larger than one packet,
2143 * even if the size is a multiple of the packet size.
2144 */
086fa5ff 2145 blk_queue_max_hw_sectors(q, pd->settings.size);
1da177e4
LT
2146 set_bit(PACKET_WRITABLE, &pd->flags);
2147 } else {
2148 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2149 clear_bit(PACKET_WRITABLE, &pd->flags);
2150 }
2151
ada94973
RH
2152 ret = pkt_set_segment_merging(pd, q);
2153 if (ret)
e525fd89 2154 goto out_putdev;
1da177e4 2155
e1bc89bc
PO
2156 if (write) {
2157 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
fa63c0ab 2158 pkt_err(pd, "not enough memory for buffers\n");
e1bc89bc 2159 ret = -ENOMEM;
e525fd89 2160 goto out_putdev;
e1bc89bc 2161 }
0c075d64 2162 pkt_info(pd, "%lukB available on disc\n", lba << 1);
e1bc89bc 2163 }
1da177e4
LT
2164
2165 return 0;
2166
2167out_putdev:
b8d95484 2168 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1da177e4
LT
2169out:
2170 return ret;
2171}
2172
2173/*
2174 * called when the device is closed. makes sure that the device flushes
2175 * the internal cache before we close.
2176 */
2177static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2178{
2179 if (flush && pkt_flush_cache(pd))
844aa797 2180 pkt_dbg(1, pd, "not flushing cache\n");
1da177e4
LT
2181
2182 pkt_lock_door(pd, 0);
2183
2184 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
e525fd89 2185 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
e1bc89bc
PO
2186
2187 pkt_shrink_pktlist(pd);
1da177e4
LT
2188}
2189
252a52aa 2190static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
1da177e4
LT
2191{
2192 if (dev_minor >= MAX_WRITERS)
2193 return NULL;
55690c07
JP
2194
2195 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
1da177e4
LT
2196 return pkt_devs[dev_minor];
2197}
2198
5e5e007c 2199static int pkt_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
2200{
2201 struct pktcdvd_device *pd = NULL;
2202 int ret;
2203
2a48fc0a 2204 mutex_lock(&pktcdvd_mutex);
1657f824 2205 mutex_lock(&ctl_mutex);
5e5e007c 2206 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
1da177e4
LT
2207 if (!pd) {
2208 ret = -ENODEV;
2209 goto out;
2210 }
2211 BUG_ON(pd->refcnt < 0);
2212
2213 pd->refcnt++;
46f4e1b7 2214 if (pd->refcnt > 1) {
5e5e007c 2215 if ((mode & FMODE_WRITE) &&
46f4e1b7
PO
2216 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2217 ret = -EBUSY;
2218 goto out_dec;
2219 }
2220 } else {
5e5e007c 2221 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
01fd9fda 2222 if (ret)
1da177e4 2223 goto out_dec;
1da177e4
LT
2224 /*
2225 * needed here as well, since ext2 (among others) may change
2226 * the blocksize at mount time
2227 */
5e5e007c 2228 set_blocksize(bdev, CD_FRAMESIZE);
1da177e4
LT
2229 }
2230
1657f824 2231 mutex_unlock(&ctl_mutex);
2a48fc0a 2232 mutex_unlock(&pktcdvd_mutex);
1da177e4
LT
2233 return 0;
2234
2235out_dec:
2236 pd->refcnt--;
2237out:
1657f824 2238 mutex_unlock(&ctl_mutex);
2a48fc0a 2239 mutex_unlock(&pktcdvd_mutex);
1da177e4
LT
2240 return ret;
2241}
2242
db2a144b 2243static void pkt_close(struct gendisk *disk, fmode_t mode)
1da177e4 2244{
5e5e007c 2245 struct pktcdvd_device *pd = disk->private_data;
1da177e4 2246
2a48fc0a 2247 mutex_lock(&pktcdvd_mutex);
1657f824 2248 mutex_lock(&ctl_mutex);
1da177e4
LT
2249 pd->refcnt--;
2250 BUG_ON(pd->refcnt < 0);
2251 if (pd->refcnt == 0) {
2252 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2253 pkt_release_dev(pd, flush);
2254 }
1657f824 2255 mutex_unlock(&ctl_mutex);
2a48fc0a 2256 mutex_unlock(&pktcdvd_mutex);
1da177e4
LT
2257}
2258
2259
4246a0b6 2260static void pkt_end_io_read_cloned(struct bio *bio)
1da177e4
LT
2261{
2262 struct packet_stacked_data *psd = bio->bi_private;
2263 struct pktcdvd_device *pd = psd->pd;
2264
4e4cbee9 2265 psd->bio->bi_status = bio->bi_status;
1da177e4 2266 bio_put(bio);
4246a0b6 2267 bio_endio(psd->bio);
64c4bc4d 2268 mempool_free(psd, &psd_pool);
1da177e4 2269 pkt_bio_finished(pd);
1da177e4
LT
2270}
2271
20d0189b 2272static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
1da177e4 2273{
64c4bc4d
KO
2274 struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set);
2275 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
20d0189b
KO
2276
2277 psd->pd = pd;
2278 psd->bio = bio;
74d46992 2279 bio_set_dev(cloned_bio, pd->bdev);
20d0189b
KO
2280 cloned_bio->bi_private = psd;
2281 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2282 pd->stats.secs_r += bio_sectors(bio);
2283 pkt_queue_bio(pd, cloned_bio);
2284}
2285
2286static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2287{
2288 struct pktcdvd_device *pd = q->queuedata;
1da177e4
LT
2289 sector_t zone;
2290 struct packet_data *pkt;
2291 int was_empty, blocked_bio;
2292 struct pkt_rb_node *node;
2293
4f024f37 2294 zone = get_zone(bio->bi_iter.bi_sector, pd);
1da177e4
LT
2295
2296 /*
2297 * If we find a matching packet in state WAITING or READ_WAIT, we can
2298 * just append this bio to that packet.
2299 */
2300 spin_lock(&pd->cdrw.active_list_lock);
2301 blocked_bio = 0;
2302 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2303 if (pkt->sector == zone) {
2304 spin_lock(&pkt->lock);
2305 if ((pkt->state == PACKET_WAITING_STATE) ||
2306 (pkt->state == PACKET_READ_WAIT_STATE)) {
c5ecc484 2307 bio_list_add(&pkt->orig_bios, bio);
4f024f37
KO
2308 pkt->write_size +=
2309 bio->bi_iter.bi_size / CD_FRAMESIZE;
1da177e4
LT
2310 if ((pkt->write_size >= pkt->frames) &&
2311 (pkt->state == PACKET_WAITING_STATE)) {
2312 atomic_inc(&pkt->run_sm);
2313 wake_up(&pd->wqueue);
2314 }
2315 spin_unlock(&pkt->lock);
2316 spin_unlock(&pd->cdrw.active_list_lock);
5a7bbad2 2317 return;
1da177e4
LT
2318 } else {
2319 blocked_bio = 1;
2320 }
2321 spin_unlock(&pkt->lock);
2322 }
2323 }
2324 spin_unlock(&pd->cdrw.active_list_lock);
2325
0a0fc960
TM
2326 /*
2327 * Test if there is enough room left in the bio work queue
2328 * (queue size >= congestion on mark).
2329 * If not, wait till the work queue size is below the congestion off mark.
2330 */
2331 spin_lock(&pd->lock);
2332 if (pd->write_congestion_on > 0
2333 && pd->bio_queue_size >= pd->write_congestion_on) {
dc3b17cc 2334 set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
0a0fc960
TM
2335 do {
2336 spin_unlock(&pd->lock);
8aa7e847 2337 congestion_wait(BLK_RW_ASYNC, HZ);
0a0fc960
TM
2338 spin_lock(&pd->lock);
2339 } while(pd->bio_queue_size > pd->write_congestion_off);
2340 }
2341 spin_unlock(&pd->lock);
2342
1da177e4
LT
2343 /*
2344 * No matching packet found. Store the bio in the work queue.
2345 */
64c4bc4d 2346 node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
1da177e4
LT
2347 node->bio = bio;
2348 spin_lock(&pd->lock);
2349 BUG_ON(pd->bio_queue_size < 0);
2350 was_empty = (pd->bio_queue_size == 0);
2351 pkt_rbtree_insert(pd, node);
2352 spin_unlock(&pd->lock);
2353
2354 /*
2355 * Wake up the worker thread.
2356 */
2357 atomic_set(&pd->scan_queue, 1);
2358 if (was_empty) {
2359 /* This wake_up is required for correct operation */
2360 wake_up(&pd->wqueue);
2361 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2362 /*
2363 * This wake up is not required for correct operation,
2364 * but improves performance in some cases.
2365 */
2366 wake_up(&pd->wqueue);
2367 }
20d0189b
KO
2368}
2369
c62b37d9 2370static blk_qc_t pkt_submit_bio(struct bio *bio)
20d0189b
KO
2371{
2372 struct pktcdvd_device *pd;
2373 char b[BDEVNAME_SIZE];
2374 struct bio *split;
2375
f695ca38 2376 blk_queue_split(&bio);
54efd50b 2377
c62b37d9 2378 pd = bio->bi_disk->queue->queuedata;
20d0189b 2379 if (!pd) {
74d46992 2380 pr_err("%s incorrect request queue\n", bio_devname(bio, b));
20d0189b
KO
2381 goto end_io;
2382 }
2383
2384 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2385 (unsigned long long)bio->bi_iter.bi_sector,
2386 (unsigned long long)bio_end_sector(bio));
2387
2388 /*
2389 * Clone READ bios so we can have our own bi_end_io callback.
2390 */
2391 if (bio_data_dir(bio) == READ) {
2392 pkt_make_request_read(pd, bio);
dece1635 2393 return BLK_QC_T_NONE;
20d0189b
KO
2394 }
2395
2396 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2397 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2398 (unsigned long long)bio->bi_iter.bi_sector);
2399 goto end_io;
2400 }
2401
2402 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2403 pkt_err(pd, "wrong bio size\n");
2404 goto end_io;
2405 }
2406
20d0189b
KO
2407 do {
2408 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2409 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2410
2411 if (last_zone != zone) {
2412 BUG_ON(last_zone != zone + pd->settings.size);
2413
2414 split = bio_split(bio, last_zone -
2415 bio->bi_iter.bi_sector,
64c4bc4d 2416 GFP_NOIO, &pkt_bio_set);
20d0189b
KO
2417 bio_chain(split, bio);
2418 } else {
2419 split = bio;
2420 }
2421
c62b37d9 2422 pkt_make_request_write(bio->bi_disk->queue, split);
20d0189b
KO
2423 } while (split != bio);
2424
dece1635 2425 return BLK_QC_T_NONE;
1da177e4 2426end_io:
6712ecf8 2427 bio_io_error(bio);
dece1635 2428 return BLK_QC_T_NONE;
1da177e4
LT
2429}
2430
1da177e4
LT
2431static void pkt_init_queue(struct pktcdvd_device *pd)
2432{
165125e1 2433 struct request_queue *q = pd->disk->queue;
1da177e4 2434
e1defc4f 2435 blk_queue_logical_block_size(q, CD_FRAMESIZE);
086fa5ff 2436 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
1da177e4
LT
2437 q->queuedata = pd;
2438}
2439
2440static int pkt_seq_show(struct seq_file *m, void *p)
2441{
2442 struct pktcdvd_device *pd = m->private;
2443 char *msg;
2444 char bdev_buf[BDEVNAME_SIZE];
2445 int states[PACKET_NUM_STATES];
2446
2447 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2448 bdevname(pd->bdev, bdev_buf));
2449
2450 seq_printf(m, "\nSettings:\n");
2451 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2452
2453 if (pd->settings.write_type == 0)
2454 msg = "Packet";
2455 else
2456 msg = "Unknown";
2457 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2458
2459 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2460 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2461
2462 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2463
2464 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2465 msg = "Mode 1";
2466 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2467 msg = "Mode 2";
2468 else
2469 msg = "Unknown";
2470 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2471
2472 seq_printf(m, "\nStatistics:\n");
2473 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2474 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2475 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2476 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2477 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2478
2479 seq_printf(m, "\nMisc:\n");
2480 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2481 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2482 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2483 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2484 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2485 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2486
2487 seq_printf(m, "\nQueue state:\n");
2488 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2489 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2490 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2491
2492 pkt_count_states(pd, states);
2493 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2494 states[0], states[1], states[2], states[3], states[4], states[5]);
2495
0a0fc960
TM
2496 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2497 pd->write_congestion_off,
2498 pd->write_congestion_on);
1da177e4
LT
2499 return 0;
2500}
2501
1da177e4
LT
2502static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2503{
2504 int i;
1da177e4 2505 char b[BDEVNAME_SIZE];
1da177e4
LT
2506 struct block_device *bdev;
2507
2508 if (pd->pkt_dev == dev) {
fa63c0ab 2509 pkt_err(pd, "recursive setup not allowed\n");
1da177e4
LT
2510 return -EBUSY;
2511 }
2512 for (i = 0; i < MAX_WRITERS; i++) {
2513 struct pktcdvd_device *pd2 = pkt_devs[i];
2514 if (!pd2)
2515 continue;
2516 if (pd2->bdev->bd_dev == dev) {
fa63c0ab
JP
2517 pkt_err(pd, "%s already setup\n",
2518 bdevname(pd2->bdev, b));
1da177e4
LT
2519 return -EBUSY;
2520 }
2521 if (pd2->pkt_dev == dev) {
fa63c0ab 2522 pkt_err(pd, "can't chain pktcdvd devices\n");
1da177e4
LT
2523 return -EBUSY;
2524 }
2525 }
2526
b8d95484
CH
2527 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
2528 if (IS_ERR(bdev))
2529 return PTR_ERR(bdev);
ec2be6a9 2530 if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
882d4171 2531 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
ec2be6a9
BVA
2532 return -EINVAL;
2533 }
1da177e4
LT
2534
2535 /* This is safe, since we have a reference from open(). */
2536 __module_get(THIS_MODULE);
2537
1da177e4
LT
2538 pd->bdev = bdev;
2539 set_blocksize(bdev, CD_FRAMESIZE);
2540
2541 pkt_init_queue(pd);
2542
2543 atomic_set(&pd->cdrw.pending_bios, 0);
2544 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2545 if (IS_ERR(pd->cdrw.thread)) {
fa63c0ab 2546 pkt_err(pd, "can't start kernel thread\n");
e1bc89bc 2547 goto out_mem;
1da177e4
LT
2548 }
2549
3f3942ac 2550 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
844aa797 2551 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
1da177e4
LT
2552 return 0;
2553
1da177e4 2554out_mem:
2cbed890 2555 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
1da177e4
LT
2556 /* This is safe: open() is still holding a reference. */
2557 module_put(THIS_MODULE);
b8d95484 2558 return -ENOMEM;
1da177e4
LT
2559}
2560
5e5e007c 2561static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1da177e4 2562{
5e5e007c 2563 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
8a6cfeb6 2564 int ret;
1da177e4 2565
844aa797 2566 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
cd3f2cd0 2567 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1da177e4 2568
2a48fc0a 2569 mutex_lock(&pktcdvd_mutex);
1da177e4 2570 switch (cmd) {
a0eb62a0
AV
2571 case CDROMEJECT:
2572 /*
2573 * The door gets locked when the device is opened, so we
2574 * have to unlock it or else the eject command fails.
2575 */
2576 if (pd->refcnt == 1)
2577 pkt_lock_door(pd, 0);
df561f66 2578 fallthrough;
1da177e4
LT
2579 /*
2580 * forward selected CDROM ioctls to CD-ROM, for UDF
2581 */
2582 case CDROMMULTISESSION:
2583 case CDROMREADTOCENTRY:
2584 case CDROM_LAST_WRITTEN:
2585 case CDROM_SEND_PACKET:
2586 case SCSI_IOCTL_SEND_COMMAND:
8a6cfeb6
AB
2587 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2588 break;
1da177e4
LT
2589
2590 default:
844aa797 2591 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
8a6cfeb6 2592 ret = -ENOTTY;
1da177e4 2593 }
2a48fc0a 2594 mutex_unlock(&pktcdvd_mutex);
8560c650 2595
8a6cfeb6 2596 return ret;
1da177e4
LT
2597}
2598
3c0d2060
TH
2599static unsigned int pkt_check_events(struct gendisk *disk,
2600 unsigned int clearing)
1da177e4
LT
2601{
2602 struct pktcdvd_device *pd = disk->private_data;
2603 struct gendisk *attached_disk;
2604
2605 if (!pd)
2606 return 0;
2607 if (!pd->bdev)
2608 return 0;
2609 attached_disk = pd->bdev->bd_disk;
3c0d2060 2610 if (!attached_disk || !attached_disk->fops->check_events)
1da177e4 2611 return 0;
3c0d2060 2612 return attached_disk->fops->check_events(attached_disk, clearing);
1da177e4
LT
2613}
2614
348e114b
CH
2615static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
2616{
2617 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
2618}
2619
83d5cde4 2620static const struct block_device_operations pktcdvd_ops = {
1da177e4 2621 .owner = THIS_MODULE,
c62b37d9 2622 .submit_bio = pkt_submit_bio,
5e5e007c
AV
2623 .open = pkt_open,
2624 .release = pkt_close,
8a6cfeb6 2625 .ioctl = pkt_ioctl,
ab8bc541 2626 .compat_ioctl = blkdev_compat_ptr_ioctl,
3c0d2060 2627 .check_events = pkt_check_events,
348e114b 2628 .devnode = pkt_devnode,
1da177e4
LT
2629};
2630
2631/*
2632 * Set up mapping from pktcdvd device to CD-ROM device.
2633 */
adb9250a 2634static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
1da177e4
LT
2635{
2636 int idx;
2637 int ret = -ENOMEM;
2638 struct pktcdvd_device *pd;
2639 struct gendisk *disk;
adb9250a
TM
2640
2641 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1da177e4
LT
2642
2643 for (idx = 0; idx < MAX_WRITERS; idx++)
2644 if (!pkt_devs[idx])
2645 break;
2646 if (idx == MAX_WRITERS) {
99481334 2647 pr_err("max %d writers supported\n", MAX_WRITERS);
adb9250a
TM
2648 ret = -EBUSY;
2649 goto out_mutex;
1da177e4
LT
2650 }
2651
1107d2e0 2652 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
1da177e4 2653 if (!pd)
adb9250a 2654 goto out_mutex;
1da177e4 2655
64c4bc4d
KO
2656 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
2657 sizeof(struct pkt_rb_node));
2658 if (ret)
1da177e4
LT
2659 goto out_mem;
2660
e1bc89bc
PO
2661 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2662 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2663 spin_lock_init(&pd->cdrw.active_list_lock);
2664
1da177e4
LT
2665 spin_lock_init(&pd->lock);
2666 spin_lock_init(&pd->iosched.lock);
c5ecc484
AM
2667 bio_list_init(&pd->iosched.read_queue);
2668 bio_list_init(&pd->iosched.write_queue);
7822082d 2669 sprintf(pd->name, DRIVER_NAME"%d", idx);
1da177e4
LT
2670 init_waitqueue_head(&pd->wqueue);
2671 pd->bio_queue = RB_ROOT;
2672
0a0fc960
TM
2673 pd->write_congestion_on = write_congestion_on;
2674 pd->write_congestion_off = write_congestion_off;
2675
566484a9 2676 ret = -ENOMEM;
adb9250a
TM
2677 disk = alloc_disk(1);
2678 if (!disk)
2679 goto out_mem;
2680 pd->disk = disk;
add21660 2681 disk->major = pktdev_major;
1da177e4
LT
2682 disk->first_minor = idx;
2683 disk->fops = &pktcdvd_ops;
2684 disk->flags = GENHD_FL_REMOVABLE;
adb9250a 2685 strcpy(disk->disk_name, pd->name);
1da177e4 2686 disk->private_data = pd;
c62b37d9 2687 disk->queue = blk_alloc_queue(NUMA_NO_NODE);
1da177e4
LT
2688 if (!disk->queue)
2689 goto out_mem2;
2690
f331c029 2691 pd->pkt_dev = MKDEV(pktdev_major, idx);
1da177e4
LT
2692 ret = pkt_new_dev(pd, dev);
2693 if (ret)
5a0ec388 2694 goto out_mem2;
1da177e4 2695
3c0d2060
TH
2696 /* inherit events of the host device */
2697 disk->events = pd->bdev->bd_disk->events;
3c0d2060 2698
1da177e4 2699 add_disk(disk);
adb9250a 2700
32694850
TM
2701 pkt_sysfs_dev_new(pd);
2702 pkt_debugfs_dev_new(pd);
2703
1da177e4 2704 pkt_devs[idx] = pd;
adb9250a
TM
2705 if (pkt_dev)
2706 *pkt_dev = pd->pkt_dev;
2707
2708 mutex_unlock(&ctl_mutex);
1da177e4
LT
2709 return 0;
2710
1da177e4
LT
2711out_mem2:
2712 put_disk(disk);
2713out_mem:
64c4bc4d 2714 mempool_exit(&pd->rb_pool);
1da177e4 2715 kfree(pd);
adb9250a
TM
2716out_mutex:
2717 mutex_unlock(&ctl_mutex);
99481334 2718 pr_err("setup of pktcdvd device failed\n");
1da177e4
LT
2719 return ret;
2720}
2721
2722/*
2723 * Tear down mapping from pktcdvd device to CD-ROM device.
2724 */
adb9250a 2725static int pkt_remove_dev(dev_t pkt_dev)
1da177e4
LT
2726{
2727 struct pktcdvd_device *pd;
2728 int idx;
adb9250a
TM
2729 int ret = 0;
2730
2731 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1da177e4
LT
2732
2733 for (idx = 0; idx < MAX_WRITERS; idx++) {
2734 pd = pkt_devs[idx];
2735 if (pd && (pd->pkt_dev == pkt_dev))
2736 break;
2737 }
2738 if (idx == MAX_WRITERS) {
666dc7c9 2739 pr_debug("dev not setup\n");
adb9250a
TM
2740 ret = -ENXIO;
2741 goto out;
1da177e4
LT
2742 }
2743
adb9250a
TM
2744 if (pd->refcnt > 0) {
2745 ret = -EBUSY;
2746 goto out;
2747 }
1da177e4
LT
2748 if (!IS_ERR(pd->cdrw.thread))
2749 kthread_stop(pd->cdrw.thread);
2750
32694850
TM
2751 pkt_devs[idx] = NULL;
2752
2753 pkt_debugfs_dev_remove(pd);
2754 pkt_sysfs_dev_remove(pd);
2755
2cbed890 2756 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
1da177e4 2757
1da177e4 2758 remove_proc_entry(pd->name, pkt_proc);
844aa797 2759 pkt_dbg(1, pd, "writer unmapped\n");
1da177e4
LT
2760
2761 del_gendisk(pd->disk);
1312f40e 2762 blk_cleanup_queue(pd->disk->queue);
1da177e4
LT
2763 put_disk(pd->disk);
2764
64c4bc4d 2765 mempool_exit(&pd->rb_pool);
1da177e4
LT
2766 kfree(pd);
2767
2768 /* This is safe: open() is still holding a reference. */
2769 module_put(THIS_MODULE);
adb9250a
TM
2770
2771out:
2772 mutex_unlock(&ctl_mutex);
2773 return ret;
1da177e4
LT
2774}
2775
2776static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2777{
adb9250a
TM
2778 struct pktcdvd_device *pd;
2779
2780 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2781
2782 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
1da177e4
LT
2783 if (pd) {
2784 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2785 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2786 } else {
2787 ctrl_cmd->dev = 0;
2788 ctrl_cmd->pkt_dev = 0;
2789 }
2790 ctrl_cmd->num_devices = MAX_WRITERS;
adb9250a
TM
2791
2792 mutex_unlock(&ctl_mutex);
1da177e4
LT
2793}
2794
f80a0ca6 2795static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1da177e4
LT
2796{
2797 void __user *argp = (void __user *)arg;
2798 struct pkt_ctrl_command ctrl_cmd;
2799 int ret = 0;
adb9250a 2800 dev_t pkt_dev = 0;
1da177e4
LT
2801
2802 if (cmd != PACKET_CTRL_CMD)
2803 return -ENOTTY;
2804
2805 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2806 return -EFAULT;
2807
2808 switch (ctrl_cmd.command) {
2809 case PKT_CTRL_CMD_SETUP:
2810 if (!capable(CAP_SYS_ADMIN))
2811 return -EPERM;
adb9250a
TM
2812 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2813 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
1da177e4
LT
2814 break;
2815 case PKT_CTRL_CMD_TEARDOWN:
2816 if (!capable(CAP_SYS_ADMIN))
2817 return -EPERM;
adb9250a 2818 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
1da177e4
LT
2819 break;
2820 case PKT_CTRL_CMD_STATUS:
1da177e4 2821 pkt_get_status(&ctrl_cmd);
1da177e4
LT
2822 break;
2823 default:
2824 return -ENOTTY;
2825 }
2826
2827 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2828 return -EFAULT;
2829 return ret;
2830}
2831
f80a0ca6
AB
2832#ifdef CONFIG_COMPAT
2833static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2834{
2835 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2836}
2837#endif
1da177e4 2838
2b8693c0 2839static const struct file_operations pkt_ctl_fops = {
f80a0ca6
AB
2840 .open = nonseekable_open,
2841 .unlocked_ioctl = pkt_ctl_ioctl,
2842#ifdef CONFIG_COMPAT
2843 .compat_ioctl = pkt_ctl_compat_ioctl,
2844#endif
2845 .owner = THIS_MODULE,
6038f373 2846 .llseek = no_llseek,
1da177e4
LT
2847};
2848
2849static struct miscdevice pkt_misc = {
2850 .minor = MISC_DYNAMIC_MINOR,
7822082d 2851 .name = DRIVER_NAME,
e454cea2 2852 .nodename = "pktcdvd/control",
1da177e4
LT
2853 .fops = &pkt_ctl_fops
2854};
2855
2856static int __init pkt_init(void)
2857{
2858 int ret;
2859
32694850
TM
2860 mutex_init(&ctl_mutex);
2861
64c4bc4d
KO
2862 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
2863 sizeof(struct packet_stacked_data));
2864 if (ret)
2865 return ret;
2866 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
2867 if (ret) {
2868 mempool_exit(&psd_pool);
2869 return ret;
a1d91404 2870 }
1da177e4 2871
add21660 2872 ret = register_blkdev(pktdev_major, DRIVER_NAME);
1da177e4 2873 if (ret < 0) {
99481334 2874 pr_err("unable to register block device\n");
1da177e4
LT
2875 goto out2;
2876 }
add21660
TM
2877 if (!pktdev_major)
2878 pktdev_major = ret;
1da177e4 2879
32694850
TM
2880 ret = pkt_sysfs_init();
2881 if (ret)
2882 goto out;
2883
2884 pkt_debugfs_init();
2885
1da177e4
LT
2886 ret = misc_register(&pkt_misc);
2887 if (ret) {
99481334 2888 pr_err("unable to register misc device\n");
32694850 2889 goto out_misc;
1da177e4
LT
2890 }
2891
928b4d8c 2892 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
1da177e4 2893
1da177e4
LT
2894 return 0;
2895
32694850
TM
2896out_misc:
2897 pkt_debugfs_cleanup();
2898 pkt_sysfs_cleanup();
1da177e4 2899out:
add21660 2900 unregister_blkdev(pktdev_major, DRIVER_NAME);
1da177e4 2901out2:
64c4bc4d
KO
2902 mempool_exit(&psd_pool);
2903 bioset_exit(&pkt_bio_set);
1da177e4
LT
2904 return ret;
2905}
2906
2907static void __exit pkt_exit(void)
2908{
928b4d8c 2909 remove_proc_entry("driver/"DRIVER_NAME, NULL);
1da177e4 2910 misc_deregister(&pkt_misc);
32694850
TM
2911
2912 pkt_debugfs_cleanup();
2913 pkt_sysfs_cleanup();
2914
add21660 2915 unregister_blkdev(pktdev_major, DRIVER_NAME);
64c4bc4d
KO
2916 mempool_exit(&psd_pool);
2917 bioset_exit(&pkt_bio_set);
1da177e4
LT
2918}
2919
2920MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2921MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2922MODULE_LICENSE("GPL");
2923
2924module_init(pkt_init);
2925module_exit(pkt_exit);