]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/ide/ide-dma.c
ide: pass command instead of request to ide_pio_datablock()
[mirror_ubuntu-hirsute-kernel.git] / drivers / ide / ide-dma.c
CommitLineData
1da177e4 1/*
204f47c5
BZ
2 * IDE DMA support (including IDE PCI BM-DMA).
3 *
59bca8cc
BZ
4 * Copyright (C) 1995-1998 Mark Lord
5 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
58f189fc 7 *
1da177e4 8 * May be copied or modified under the terms of the GNU General Public License
204f47c5
BZ
9 *
10 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
1da177e4
LT
11 */
12
13/*
14 * Special Thanks to Mark for his Six years of work.
1da177e4
LT
15 */
16
17/*
1da177e4
LT
18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
19 * fixing the problem with the BIOS on some Acer motherboards.
20 *
21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
23 *
24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
25 * at generic DMA -- his patches were referred to when preparing this code.
26 *
27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
1da177e4
LT
29 */
30
1da177e4
LT
31#include <linux/types.h>
32#include <linux/kernel.h>
1da177e4 33#include <linux/ide.h>
1da177e4 34#include <linux/scatterlist.h>
5c05ff68 35#include <linux/dma-mapping.h>
1da177e4 36
db3f99ef 37static const struct drive_list_entry drive_whitelist[] = {
c2d3ce8c
JH
38 { "Micropolis 2112A" , NULL },
39 { "CONNER CTMA 4000" , NULL },
40 { "CONNER CTT8000-A" , NULL },
41 { "ST34342A" , NULL },
1da177e4
LT
42 { NULL , NULL }
43};
44
db3f99ef 45static const struct drive_list_entry drive_blacklist[] = {
c2d3ce8c
JH
46 { "WDC AC11000H" , NULL },
47 { "WDC AC22100H" , NULL },
48 { "WDC AC32500H" , NULL },
49 { "WDC AC33100H" , NULL },
50 { "WDC AC31600H" , NULL },
1da177e4
LT
51 { "WDC AC32100H" , "24.09P07" },
52 { "WDC AC23200L" , "21.10N21" },
c2d3ce8c
JH
53 { "Compaq CRD-8241B" , NULL },
54 { "CRD-8400B" , NULL },
55 { "CRD-8480B", NULL },
56 { "CRD-8482B", NULL },
57 { "CRD-84" , NULL },
58 { "SanDisk SDP3B" , NULL },
59 { "SanDisk SDP3B-64" , NULL },
60 { "SANYO CD-ROM CRD" , NULL },
61 { "HITACHI CDR-8" , NULL },
62 { "HITACHI CDR-8335" , NULL },
63 { "HITACHI CDR-8435" , NULL },
64 { "Toshiba CD-ROM XM-6202B" , NULL },
65 { "TOSHIBA CD-ROM XM-1702BC", NULL },
66 { "CD-532E-A" , NULL },
67 { "E-IDE CD-ROM CR-840", NULL },
68 { "CD-ROM Drive/F5A", NULL },
69 { "WPI CDD-820", NULL },
70 { "SAMSUNG CD-ROM SC-148C", NULL },
71 { "SAMSUNG CD-ROM SC", NULL },
72 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
73 { "_NEC DV5800A", NULL },
5a6248ca 74 { "SAMSUNG CD-ROM SN-124", "N001" },
c2d3ce8c 75 { "Seagate STT20000A", NULL },
b0bc65b9 76 { "CD-ROM CDR_U200", "1.09" },
1da177e4
LT
77 { NULL , NULL }
78
79};
80
1da177e4
LT
81/**
82 * ide_dma_intr - IDE DMA interrupt handler
83 * @drive: the drive the interrupt is for
84 *
db3f99ef 85 * Handle an interrupt completing a read/write DMA transfer on an
1da177e4
LT
86 * IDE device
87 */
db3f99ef
BZ
88
89ide_startstop_t ide_dma_intr(ide_drive_t *drive)
1da177e4 90{
b73c7ee2 91 ide_hwif_t *hwif = drive->hwif;
1da177e4
LT
92 u8 stat = 0, dma_stat = 0;
93
b73c7ee2 94 dma_stat = hwif->dma_ops->dma_end(drive);
374e042c 95 stat = hwif->tp_ops->read_status(hwif);
c47137a9 96
3a7d2484 97 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
1da177e4 98 if (!dma_stat) {
adb1af98 99 struct ide_cmd *cmd = &hwif->cmd;
1da177e4 100
adb1af98 101 ide_finish_cmd(drive, cmd, stat);
1da177e4
LT
102 return ide_stopped;
103 }
db3f99ef
BZ
104 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
105 drive->name, __func__, dma_stat);
1da177e4
LT
106 }
107 return ide_error(drive, "dma_intr", stat);
108}
1da177e4
LT
109EXPORT_SYMBOL_GPL(ide_dma_intr);
110
2dbe7e91 111int ide_dma_good_drive(ide_drive_t *drive)
75d7d963
BZ
112{
113 return ide_in_drive_list(drive->id, drive_whitelist);
114}
115
1da177e4
LT
116/**
117 * ide_build_sglist - map IDE scatter gather for DMA I/O
118 * @drive: the drive to build the DMA table for
119 * @rq: the request holding the sg list
120 *
5c05ff68
BZ
121 * Perform the DMA mapping magic necessary to access the source or
122 * target buffers of a request via DMA. The lower layers of the
1da177e4 123 * kernel provide the necessary cache management so that we can
5c05ff68 124 * operate in a portable fashion.
1da177e4
LT
125 */
126
127int ide_build_sglist(ide_drive_t *drive, struct request *rq)
128{
db3f99ef 129 ide_hwif_t *hwif = drive->hwif;
1da177e4 130 struct scatterlist *sg = hwif->sg_table;
5d82720a 131 int i;
1da177e4 132
1da177e4
LT
133 ide_map_sg(drive, rq);
134
135 if (rq_data_dir(rq) == READ)
5c05ff68 136 hwif->sg_dma_direction = DMA_FROM_DEVICE;
1da177e4 137 else
5c05ff68 138 hwif->sg_dma_direction = DMA_TO_DEVICE;
1da177e4 139
5d82720a 140 i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
e6830a86
BZ
141 if (i == 0)
142 ide_map_sg(drive, rq);
143 else {
5d82720a
FT
144 hwif->orig_sg_nents = hwif->sg_nents;
145 hwif->sg_nents = i;
146 }
147
148 return i;
1da177e4 149}
1da177e4 150
1da177e4
LT
151/**
152 * ide_destroy_dmatable - clean up DMA mapping
153 * @drive: The drive to unmap
154 *
155 * Teardown mappings after DMA has completed. This must be called
156 * after the completion of each use of ide_build_dmatable and before
157 * the next use of ide_build_dmatable. Failure to do so will cause
158 * an oops as only one mapping can be live for each target at a given
159 * time.
160 */
db3f99ef
BZ
161
162void ide_destroy_dmatable(ide_drive_t *drive)
1da177e4 163{
36501650 164 ide_hwif_t *hwif = drive->hwif;
1da177e4 165
5d82720a 166 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
36501650 167 hwif->sg_dma_direction);
1da177e4 168}
1da177e4
LT
169EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
170
1da177e4 171/**
7469aaf6 172 * ide_dma_off_quietly - Generic DMA kill
1da177e4
LT
173 * @drive: drive to control
174 *
db3f99ef 175 * Turn off the current DMA on this IDE controller.
1da177e4
LT
176 */
177
7469aaf6 178void ide_dma_off_quietly(ide_drive_t *drive)
1da177e4 179{
97100fc8 180 drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
1da177e4
LT
181 ide_toggle_bounce(drive, 0);
182
5e37bdc0 183 drive->hwif->dma_ops->dma_host_set(drive, 0);
1da177e4 184}
7469aaf6 185EXPORT_SYMBOL(ide_dma_off_quietly);
1da177e4
LT
186
187/**
7469aaf6 188 * ide_dma_off - disable DMA on a device
1da177e4
LT
189 * @drive: drive to disable DMA on
190 *
191 * Disable IDE DMA for a device on this IDE controller.
192 * Inform the user that DMA has been disabled.
193 */
194
7469aaf6 195void ide_dma_off(ide_drive_t *drive)
1da177e4
LT
196{
197 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
4a546e04 198 ide_dma_off_quietly(drive);
1da177e4 199}
7469aaf6 200EXPORT_SYMBOL(ide_dma_off);
1da177e4 201
1da177e4 202/**
4a546e04 203 * ide_dma_on - Enable DMA on a device
1da177e4
LT
204 * @drive: drive to enable DMA on
205 *
206 * Enable IDE DMA for a device on this IDE controller.
207 */
4a546e04
BZ
208
209void ide_dma_on(ide_drive_t *drive)
1da177e4 210{
97100fc8 211 drive->dev_flags |= IDE_DFLAG_USING_DMA;
1da177e4
LT
212 ide_toggle_bounce(drive, 1);
213
5e37bdc0 214 drive->hwif->dma_ops->dma_host_set(drive, 1);
1da177e4
LT
215}
216
db3f99ef 217int __ide_dma_bad_drive(ide_drive_t *drive)
1da177e4 218{
4dde4492 219 u16 *id = drive->id;
1da177e4 220
65e5f2e3 221 int blacklist = ide_in_drive_list(id, drive_blacklist);
1da177e4
LT
222 if (blacklist) {
223 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
4dde4492 224 drive->name, (char *)&id[ATA_ID_PROD]);
1da177e4
LT
225 return blacklist;
226 }
227 return 0;
228}
1da177e4
LT
229EXPORT_SYMBOL(__ide_dma_bad_drive);
230
2d5eaa6d
BZ
231static const u8 xfer_mode_bases[] = {
232 XFER_UDMA_0,
233 XFER_MW_DMA_0,
234 XFER_SW_DMA_0,
235};
236
7670df73 237static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
2d5eaa6d 238{
4dde4492 239 u16 *id = drive->id;
2d5eaa6d 240 ide_hwif_t *hwif = drive->hwif;
ac95beed 241 const struct ide_port_ops *port_ops = hwif->port_ops;
2d5eaa6d
BZ
242 unsigned int mask = 0;
243
db3f99ef 244 switch (base) {
2d5eaa6d 245 case XFER_UDMA_0:
4dde4492 246 if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
2d5eaa6d
BZ
247 break;
248
ac95beed
BZ
249 if (port_ops && port_ops->udma_filter)
250 mask = port_ops->udma_filter(drive);
851dd33b
SS
251 else
252 mask = hwif->ultra_mask;
4dde4492 253 mask &= id[ATA_ID_UDMA_MODES];
2d5eaa6d 254
7670df73
BZ
255 /*
256 * avoid false cable warning from eighty_ninty_three()
257 */
258 if (req_mode > XFER_UDMA_2) {
259 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
260 mask &= 0x07;
261 }
2d5eaa6d
BZ
262 break;
263 case XFER_MW_DMA_0:
4dde4492 264 if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
b4e44369 265 break;
ac95beed
BZ
266 if (port_ops && port_ops->mdma_filter)
267 mask = port_ops->mdma_filter(drive);
b4e44369
SS
268 else
269 mask = hwif->mwdma_mask;
4dde4492 270 mask &= id[ATA_ID_MWDMA_MODES];
2d5eaa6d
BZ
271 break;
272 case XFER_SW_DMA_0:
4dde4492
BZ
273 if (id[ATA_ID_FIELD_VALID] & 2) {
274 mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
48fb2688
BZ
275 } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
276 u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
15a4f943
BZ
277
278 /*
279 * if the mode is valid convert it to the mask
280 * (the maximum allowed mode is XFER_SW_DMA_2)
281 */
282 if (mode <= 2)
283 mask = ((2 << mode) - 1) & hwif->swdma_mask;
284 }
2d5eaa6d
BZ
285 break;
286 default:
287 BUG();
288 break;
289 }
290
291 return mask;
292}
293
294/**
7670df73 295 * ide_find_dma_mode - compute DMA speed
2d5eaa6d 296 * @drive: IDE device
7670df73
BZ
297 * @req_mode: requested mode
298 *
299 * Checks the drive/host capabilities and finds the speed to use for
300 * the DMA transfer. The speed is then limited by the requested mode.
2d5eaa6d 301 *
7670df73
BZ
302 * Returns 0 if the drive/host combination is incapable of DMA transfers
303 * or if the requested mode is not a DMA mode.
2d5eaa6d
BZ
304 */
305
7670df73 306u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
2d5eaa6d
BZ
307{
308 ide_hwif_t *hwif = drive->hwif;
309 unsigned int mask;
310 int x, i;
311 u8 mode = 0;
312
33c1002e
BZ
313 if (drive->media != ide_disk) {
314 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
315 return 0;
316 }
2d5eaa6d
BZ
317
318 for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
7670df73
BZ
319 if (req_mode < xfer_mode_bases[i])
320 continue;
321 mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
2d5eaa6d
BZ
322 x = fls(mask) - 1;
323 if (x >= 0) {
324 mode = xfer_mode_bases[i] + x;
325 break;
326 }
327 }
328
75d7d963
BZ
329 if (hwif->chipset == ide_acorn && mode == 0) {
330 /*
331 * is this correct?
332 */
4dde4492
BZ
333 if (ide_dma_good_drive(drive) &&
334 drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
75d7d963
BZ
335 mode = XFER_MW_DMA_1;
336 }
337
3ab7efe8
BZ
338 mode = min(mode, req_mode);
339
340 printk(KERN_INFO "%s: %s mode selected\n", drive->name,
d34887da 341 mode ? ide_xfer_verbose(mode) : "no DMA");
2d5eaa6d 342
3ab7efe8 343 return mode;
2d5eaa6d 344}
7670df73 345EXPORT_SYMBOL_GPL(ide_find_dma_mode);
2d5eaa6d 346
0ae2e178 347static int ide_tune_dma(ide_drive_t *drive)
29e744d0 348{
8704de8f 349 ide_hwif_t *hwif = drive->hwif;
29e744d0
BZ
350 u8 speed;
351
97100fc8
BZ
352 if (ata_id_has_dma(drive->id) == 0 ||
353 (drive->dev_flags & IDE_DFLAG_NODMA))
122ab088
BZ
354 return 0;
355
356 /* consult the list of known "bad" drives */
357 if (__ide_dma_bad_drive(drive))
29e744d0
BZ
358 return 0;
359
3ab7efe8
BZ
360 if (ide_id_dma_bug(drive))
361 return 0;
362
8704de8f 363 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
0ae2e178
BZ
364 return config_drive_for_dma(drive);
365
29e744d0
BZ
366 speed = ide_max_dma_mode(drive);
367
951784b6
BZ
368 if (!speed)
369 return 0;
29e744d0 370
88b2b32b 371 if (ide_set_dma_mode(drive, speed))
4728d546 372 return 0;
29e744d0 373
4728d546 374 return 1;
29e744d0
BZ
375}
376
0ae2e178
BZ
377static int ide_dma_check(ide_drive_t *drive)
378{
379 ide_hwif_t *hwif = drive->hwif;
0ae2e178 380
ba4b2e60 381 if (ide_tune_dma(drive))
0ae2e178
BZ
382 return 0;
383
384 /* TODO: always do PIO fallback */
385 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
386 return -1;
387
388 ide_set_max_pio(drive);
389
ba4b2e60 390 return -1;
0ae2e178
BZ
391}
392
3ab7efe8 393int ide_id_dma_bug(ide_drive_t *drive)
1da177e4 394{
4dde4492 395 u16 *id = drive->id;
1da177e4 396
4dde4492
BZ
397 if (id[ATA_ID_FIELD_VALID] & 4) {
398 if ((id[ATA_ID_UDMA_MODES] >> 8) &&
399 (id[ATA_ID_MWDMA_MODES] >> 8))
3ab7efe8 400 goto err_out;
4dde4492
BZ
401 } else if (id[ATA_ID_FIELD_VALID] & 2) {
402 if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
403 (id[ATA_ID_SWDMA_MODES] >> 8))
3ab7efe8 404 goto err_out;
1da177e4 405 }
3ab7efe8
BZ
406 return 0;
407err_out:
408 printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
409 return 1;
1da177e4
LT
410}
411
3608b5d7
BZ
412int ide_set_dma(ide_drive_t *drive)
413{
3608b5d7
BZ
414 int rc;
415
7b905994
BZ
416 /*
417 * Force DMAing for the beginning of the check.
418 * Some chipsets appear to do interesting
419 * things, if not checked and cleared.
420 * PARANOIA!!!
421 */
4a546e04 422 ide_dma_off_quietly(drive);
3608b5d7 423
7b905994
BZ
424 rc = ide_dma_check(drive);
425 if (rc)
426 return rc;
3608b5d7 427
4a546e04
BZ
428 ide_dma_on(drive);
429
430 return 0;
3608b5d7
BZ
431}
432
578cfa0d
BZ
433void ide_check_dma_crc(ide_drive_t *drive)
434{
435 u8 mode;
436
437 ide_dma_off_quietly(drive);
438 drive->crc_count = 0;
439 mode = drive->current_speed;
440 /*
441 * Don't try non Ultra-DMA modes without iCRC's. Force the
442 * device to PIO and make the user enable SWDMA/MWDMA modes.
443 */
444 if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
445 mode--;
446 else
447 mode = XFER_PIO_4;
448 ide_set_xfer_rate(drive, mode);
449 if (drive->current_speed >= XFER_SW_DMA_0)
450 ide_dma_on(drive);
451}
452
de23ec9c 453void ide_dma_lost_irq(ide_drive_t *drive)
1da177e4 454{
de23ec9c 455 printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
1da177e4 456}
de23ec9c 457EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
1da177e4 458
ffa15a69 459void ide_dma_timeout(ide_drive_t *drive)
1da177e4 460{
db3f99ef 461 ide_hwif_t *hwif = drive->hwif;
c283f5db 462
1da177e4 463 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
1da177e4 464
5e37bdc0 465 if (hwif->dma_ops->dma_test_irq(drive))
c283f5db
SS
466 return;
467
ffa15a69
BZ
468 ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
469
5e37bdc0 470 hwif->dma_ops->dma_end(drive);
1da177e4 471}
ffa15a69 472EXPORT_SYMBOL_GPL(ide_dma_timeout);
1da177e4 473
65ca5377
BZ
474/*
475 * un-busy the port etc, and clear any pending DMA status. we want to
476 * retry the current request in pio mode instead of risking tossing it
477 * all away
478 */
479ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
480{
481 ide_hwif_t *hwif = drive->hwif;
482 struct request *rq;
483 ide_startstop_t ret = ide_stopped;
484
485 /*
486 * end current dma transaction
487 */
488
489 if (error < 0) {
490 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
491 (void)hwif->dma_ops->dma_end(drive);
492 ret = ide_error(drive, "dma timeout error",
493 hwif->tp_ops->read_status(hwif));
494 } else {
495 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
496 hwif->dma_ops->dma_timeout(drive);
497 }
498
499 /*
500 * disable dma for now, but remember that we did so because of
501 * a timeout -- we'll reenable after we finish this next request
502 * (or rather the first chunk of it) in pio.
503 */
504 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
505 drive->retry_pio++;
506 ide_dma_off_quietly(drive);
507
508 /*
509 * un-busy drive etc and make sure request is sane
510 */
511
512 rq = hwif->rq;
513 if (!rq)
514 goto out;
515
516 hwif->rq = NULL;
517
518 rq->errors = 0;
519
520 if (!rq->bio)
521 goto out;
522
523 rq->sector = rq->bio->bi_sector;
524 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
525 rq->hard_cur_sectors = rq->current_nr_sectors;
526 rq->buffer = bio_data(rq->bio);
527out:
528 return ret;
529}
530
0d1bad21 531void ide_release_dma_engine(ide_hwif_t *hwif)
1da177e4
LT
532{
533 if (hwif->dmatable_cpu) {
2bbd57ca 534 int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
36501650 535
2bbd57ca
BZ
536 dma_free_coherent(hwif->dev, prd_size,
537 hwif->dmatable_cpu, hwif->dmatable_dma);
1da177e4
LT
538 hwif->dmatable_cpu = NULL;
539 }
1da177e4 540}
2bbd57ca 541EXPORT_SYMBOL_GPL(ide_release_dma_engine);
1da177e4 542
b8e73fba 543int ide_allocate_dma_engine(ide_hwif_t *hwif)
1da177e4 544{
2bbd57ca 545 int prd_size;
36501650 546
2bbd57ca
BZ
547 if (hwif->prd_max_nents == 0)
548 hwif->prd_max_nents = PRD_ENTRIES;
549 if (hwif->prd_ent_size == 0)
550 hwif->prd_ent_size = PRD_BYTES;
1da177e4 551
2bbd57ca 552 prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
1da177e4 553
2bbd57ca
BZ
554 hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
555 &hwif->dmatable_dma,
556 GFP_ATOMIC);
557 if (hwif->dmatable_cpu == NULL) {
558 printk(KERN_ERR "%s: unable to allocate PRD table\n",
5e59c236 559 hwif->name);
2bbd57ca
BZ
560 return -ENOMEM;
561 }
1da177e4 562
2bbd57ca 563 return 0;
1da177e4 564}
b8e73fba 565EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);