]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/ata/sata_vsc.c
libata: update libata core layer to use devres
[mirror_ubuntu-artful-kernel.git] / drivers / ata / sata_vsc.c
CommitLineData
1da177e4
LT
1/*
2 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
3 *
4 * Maintained by: Jeremy Higdon @ SGI
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004 SGI
9 *
10 * Bits from Jeff Garzik, Copyright RedHat, Inc.
11 *
af36d7f0
JG
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * libata documentation is available via 'make {ps|pdf}docs',
29 * as Documentation/DocBook/libata.*
30 *
31 * Vitesse hardware documentation presumably available under NDA.
32 * Intel 31244 (same hardware interface) documentation presumably
33 * available from http://developer.intel.com/
34 *
1da177e4
LT
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
7003c05d 44#include <linux/dma-mapping.h>
a9524a76 45#include <linux/device.h>
1da177e4
LT
46#include <scsi/scsi_host.h>
47#include <linux/libata.h>
48
49#define DRV_NAME "sata_vsc"
8676ce07 50#define DRV_VERSION "2.0"
1da177e4 51
55cca65e
JG
52enum {
53 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04,
1da177e4 56
55cca65e
JG
57 /* Taskfile registers offsets */
58 VSC_SATA_TF_CMD_OFFSET = 0x00,
59 VSC_SATA_TF_DATA_OFFSET = 0x00,
60 VSC_SATA_TF_ERROR_OFFSET = 0x04,
61 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
62 VSC_SATA_TF_NSECT_OFFSET = 0x08,
63 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
64 VSC_SATA_TF_LBAM_OFFSET = 0x10,
65 VSC_SATA_TF_LBAH_OFFSET = 0x14,
66 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
67 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
68 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
69 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
70 VSC_SATA_TF_CTL_OFFSET = 0x29,
1da177e4 71
55cca65e
JG
72 /* DMA base */
73 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
74 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
75 VSC_SATA_DMA_CMD_OFFSET = 0x70,
1da177e4 76
55cca65e
JG
77 /* SCRs base */
78 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
79 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
80 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
1da177e4 81
55cca65e
JG
82 /* Port stride */
83 VSC_SATA_PORT_OFFSET = 0x200,
84
85 /* Error interrupt status bit offsets */
86 VSC_SATA_INT_ERROR_CRC = 0x40,
87 VSC_SATA_INT_ERROR_T = 0x20,
88 VSC_SATA_INT_ERROR_P = 0x10,
89 VSC_SATA_INT_ERROR_R = 0x8,
90 VSC_SATA_INT_ERROR_E = 0x4,
91 VSC_SATA_INT_ERROR_M = 0x2,
92 VSC_SATA_INT_PHY_CHANGE = 0x1,
93 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
94 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
95 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
96 VSC_SATA_INT_PHY_CHANGE),
7cbaa86b
DW
97
98 /* Host private flags (hp_flags) */
99 VSC_SATA_HP_FLAG_MSI = (1 << 0),
55cca65e 100};
1da177e4 101
7cbaa86b
DW
102struct vsc_sata_host_priv {
103 u32 hp_flags;
104};
c962990a 105
2ae5b30f 106#define is_vsc_sata_int_err(port_idx, int_status) \
c962990a 107 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
2ae5b30f 108
1da177e4
LT
109
110static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
111{
112 if (sc_reg > SCR_CONTROL)
113 return 0xffffffffU;
307e4dc2 114 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
115}
116
117
118static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
119 u32 val)
120{
121 if (sc_reg > SCR_CONTROL)
122 return;
307e4dc2 123 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
124}
125
126
16454445 127static void vsc_sata_host_stop(struct ata_host *host)
7cbaa86b 128{
16454445
JG
129 struct vsc_sata_host_priv *hpriv = host->private_data;
130 struct pci_dev *pdev = to_pci_dev(host->dev);
7cbaa86b
DW
131
132 if (hpriv->hp_flags & VSC_SATA_HP_FLAG_MSI)
133 pci_disable_msi(pdev);
134 else
135 pci_intx(pdev, 0);
136 kfree (hpriv);
16454445 137 ata_pci_host_stop(host);
7cbaa86b
DW
138}
139
140
1da177e4
LT
141static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
142{
307e4dc2 143 void __iomem *mask_addr;
1da177e4
LT
144 u8 mask;
145
cca3974e 146 mask_addr = ap->host->mmio_base +
1da177e4
LT
147 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
148 mask = readb(mask_addr);
149 if (ctl & ATA_NIEN)
150 mask |= 0x80;
151 else
152 mask &= 0x7F;
153 writeb(mask, mask_addr);
154}
155
156
057ace5e 157static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
158{
159 struct ata_ioports *ioaddr = &ap->ioaddr;
160 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
161
162 /*
163 * The only thing the ctl register is used for is SRST.
164 * That is not enabled or disabled via tf_load.
165 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
166 */
167 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
168 ap->last_ctl = tf->ctl;
169 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
170 }
171 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
850a9d8a
JG
172 writew(tf->feature | (((u16)tf->hob_feature) << 8),
173 (void __iomem *) ioaddr->feature_addr);
174 writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
175 (void __iomem *) ioaddr->nsect_addr);
176 writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
177 (void __iomem *) ioaddr->lbal_addr);
178 writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
179 (void __iomem *) ioaddr->lbam_addr);
180 writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
181 (void __iomem *) ioaddr->lbah_addr);
1da177e4 182 } else if (is_addr) {
850a9d8a
JG
183 writew(tf->feature, (void __iomem *) ioaddr->feature_addr);
184 writew(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
185 writew(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
186 writew(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
187 writew(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
1da177e4
LT
188 }
189
190 if (tf->flags & ATA_TFLAG_DEVICE)
850a9d8a 191 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
1da177e4
LT
192
193 ata_wait_idle(ap);
194}
195
196
197static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
198{
199 struct ata_ioports *ioaddr = &ap->ioaddr;
ac19bff2 200 u16 nsect, lbal, lbam, lbah, feature;
1da177e4 201
ac19bff2 202 tf->command = ata_check_status(ap);
850a9d8a
JG
203 tf->device = readw((void __iomem *) ioaddr->device_addr);
204 feature = readw((void __iomem *) ioaddr->error_addr);
205 nsect = readw((void __iomem *) ioaddr->nsect_addr);
206 lbal = readw((void __iomem *) ioaddr->lbal_addr);
207 lbam = readw((void __iomem *) ioaddr->lbam_addr);
208 lbah = readw((void __iomem *) ioaddr->lbah_addr);
ac19bff2
JG
209
210 tf->feature = feature;
211 tf->nsect = nsect;
212 tf->lbal = lbal;
213 tf->lbam = lbam;
214 tf->lbah = lbah;
1da177e4
LT
215
216 if (tf->flags & ATA_TFLAG_LBA48) {
ac19bff2 217 tf->hob_feature = feature >> 8;
1da177e4
LT
218 tf->hob_nsect = nsect >> 8;
219 tf->hob_lbal = lbal >> 8;
220 tf->hob_lbam = lbam >> 8;
221 tf->hob_lbah = lbah >> 8;
222 }
223}
224
225
226/*
227 * vsc_sata_interrupt
228 *
229 * Read the interrupt register and process for the devices that have them pending.
230 */
7d12e780 231static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
1da177e4 232{
cca3974e 233 struct ata_host *host = dev_instance;
1da177e4
LT
234 unsigned int i;
235 unsigned int handled = 0;
236 u32 int_status;
237
cca3974e 238 spin_lock(&host->lock);
1da177e4 239
cca3974e 240 int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
1da177e4 241
cca3974e 242 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
243 if (int_status & ((u32) 0xFF << (8 * i))) {
244 struct ata_port *ap;
245
cca3974e 246 ap = host->ports[i];
2ae5b30f
DW
247
248 if (is_vsc_sata_int_err(i, int_status)) {
249 u32 err_status;
250 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
251 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
252 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
253 handled++;
254 }
255
029f5468 256 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
257 struct ata_queued_cmd *qc;
258
259 qc = ata_qc_from_tag(ap, ap->active_tag);
e50362ec 260 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 261 handled += ata_host_intr(ap, qc);
84ac69e8 262 else if (is_vsc_sata_int_err(i, int_status)) {
c962990a 263 /*
2e9edbf8 264 * On some chips (i.e. Intel 31244), an error
c962990a
DW
265 * interrupt will sneak in at initialization
266 * time (phy state changes). Clearing the SCR
267 * error register is not required, but it prevents
2e9edbf8 268 * the phy state change interrupts from recurring
c962990a
DW
269 * later.
270 */
271 u32 err_status;
272 err_status = vsc_sata_scr_read(ap, SCR_ERROR);
273 printk(KERN_DEBUG "%s: clearing interrupt, "
274 "status %x; sata err status %x\n",
275 __FUNCTION__,
276 int_status, err_status);
277 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
278 /* Clear interrupt status */
2ae5b30f
DW
279 ata_chk_status(ap);
280 handled++;
281 }
1da177e4
LT
282 }
283 }
284 }
285
cca3974e 286 spin_unlock(&host->lock);
1da177e4
LT
287
288 return IRQ_RETVAL(handled);
289}
290
291
193515d5 292static struct scsi_host_template vsc_sata_sht = {
1da177e4
LT
293 .module = THIS_MODULE,
294 .name = DRV_NAME,
295 .ioctl = ata_scsi_ioctl,
296 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
297 .can_queue = ATA_DEF_QUEUE,
298 .this_id = ATA_SHT_THIS_ID,
299 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
300 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
301 .emulated = ATA_SHT_EMULATED,
302 .use_clustering = ATA_SHT_USE_CLUSTERING,
303 .proc_name = DRV_NAME,
304 .dma_boundary = ATA_DMA_BOUNDARY,
305 .slave_configure = ata_scsi_slave_config,
ccf68c34 306 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 307 .bios_param = ata_std_bios_param,
1da177e4
LT
308};
309
310
057ace5e 311static const struct ata_port_operations vsc_sata_ops = {
1da177e4
LT
312 .port_disable = ata_port_disable,
313 .tf_load = vsc_sata_tf_load,
314 .tf_read = vsc_sata_tf_read,
315 .exec_command = ata_exec_command,
316 .check_status = ata_check_status,
317 .dev_select = ata_std_dev_select,
1da177e4
LT
318 .bmdma_setup = ata_bmdma_setup,
319 .bmdma_start = ata_bmdma_start,
320 .bmdma_stop = ata_bmdma_stop,
321 .bmdma_status = ata_bmdma_status,
322 .qc_prep = ata_qc_prep,
323 .qc_issue = ata_qc_issue_prot,
a93620b8 324 .data_xfer = ata_mmio_data_xfer,
d7a80dad
TH
325 .freeze = ata_bmdma_freeze,
326 .thaw = ata_bmdma_thaw,
327 .error_handler = ata_bmdma_error_handler,
328 .post_internal_cmd = ata_bmdma_post_internal_cmd,
1da177e4
LT
329 .irq_handler = vsc_sata_interrupt,
330 .irq_clear = ata_bmdma_irq_clear,
331 .scr_read = vsc_sata_scr_read,
332 .scr_write = vsc_sata_scr_write,
333 .port_start = ata_port_start,
334 .port_stop = ata_port_stop,
7cbaa86b 335 .host_stop = vsc_sata_host_stop,
1da177e4
LT
336};
337
338static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
339{
340 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
341 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
342 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
343 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
344 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
345 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
346 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
347 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
348 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
349 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
350 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
351 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
352 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
353 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
354 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
850a9d8a
JG
355 writel(0, (void __iomem *) base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
356 writel(0, (void __iomem *) base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
1da177e4
LT
357}
358
359
360static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
361{
362 static int printed_version;
363 struct ata_probe_ent *probe_ent = NULL;
7cbaa86b 364 struct vsc_sata_host_priv *hpriv;
1da177e4
LT
365 unsigned long base;
366 int pci_dev_busy = 0;
307e4dc2 367 void __iomem *mmio_base;
1da177e4
LT
368 int rc;
369
370 if (!printed_version++)
a9524a76 371 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4
LT
372
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376
377 /*
378 * Check if we have needed resource mapped.
379 */
380 if (pci_resource_len(pdev, 0) == 0) {
381 rc = -ENODEV;
382 goto err_out;
383 }
384
385 rc = pci_request_regions(pdev, DRV_NAME);
386 if (rc) {
387 pci_dev_busy = 1;
388 goto err_out;
389 }
390
391 /*
392 * Use 32 bit DMA mask, because 64 bit address support is poor.
393 */
394 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
395 if (rc)
396 goto err_out_regions;
397 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
398 if (rc)
399 goto err_out_regions;
400
401 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
402 if (probe_ent == NULL) {
403 rc = -ENOMEM;
404 goto err_out_regions;
405 }
7cbaa86b 406
1da177e4
LT
407 memset(probe_ent, 0, sizeof(*probe_ent));
408 probe_ent->dev = pci_dev_to_dev(pdev);
409 INIT_LIST_HEAD(&probe_ent->node);
410
374b1873 411 mmio_base = pci_iomap(pdev, 0, 0);
1da177e4
LT
412 if (mmio_base == NULL) {
413 rc = -ENOMEM;
414 goto err_out_free_ent;
415 }
416 base = (unsigned long) mmio_base;
417
7cbaa86b
DW
418 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
419 if (!hpriv) {
420 rc = -ENOMEM;
421 goto err_out_iounmap;
422 }
423 memset(hpriv, 0, sizeof(*hpriv));
424
1da177e4
LT
425 /*
426 * Due to a bug in the chip, the default cache line size can't be used
427 */
428 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
429
7cbaa86b
DW
430 if (pci_enable_msi(pdev) == 0) {
431 hpriv->hp_flags |= VSC_SATA_HP_FLAG_MSI;
432 pci_intx(pdev, 0);
433 }
434 else
435 probe_ent->irq_flags = IRQF_SHARED;
436
1da177e4 437 probe_ent->sht = &vsc_sata_sht;
cca3974e 438 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
d7a80dad 439 ATA_FLAG_MMIO;
1da177e4
LT
440 probe_ent->port_ops = &vsc_sata_ops;
441 probe_ent->n_ports = 4;
442 probe_ent->irq = pdev->irq;
1da177e4 443 probe_ent->mmio_base = mmio_base;
7cbaa86b 444 probe_ent->private_data = hpriv;
1da177e4
LT
445
446 /* We don't care much about the PIO/UDMA masks, but the core won't like us
447 * if we don't fill these
448 */
449 probe_ent->pio_mask = 0x1f;
450 probe_ent->mwdma_mask = 0x07;
451 probe_ent->udma_mask = 0x7f;
452
453 /* We have 4 ports per PCI function */
454 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
455 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
456 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
457 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
458
459 pci_set_master(pdev);
460
8a60a071 461 /*
1da177e4
LT
462 * Config offset 0x98 is "Extended Control and Status Register 0"
463 * Default value is (1 << 28). All bits except bit 28 are reserved in
464 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
465 * If bit 28 is clear, each port has its own LED.
466 */
467 pci_write_config_dword(pdev, 0x98, 0);
468
469 /* FIXME: check ata_device_add return value */
470 ata_device_add(probe_ent);
1da177e4 471
7cbaa86b 472 kfree(probe_ent);
1da177e4
LT
473 return 0;
474
7cbaa86b
DW
475err_out_iounmap:
476 pci_iounmap(pdev, mmio_base);
1da177e4
LT
477err_out_free_ent:
478 kfree(probe_ent);
479err_out_regions:
480 pci_release_regions(pdev);
481err_out:
482 if (!pci_dev_busy)
483 pci_disable_device(pdev);
484 return rc;
485}
486
3b7d697d 487static const struct pci_device_id vsc_sata_pci_tbl[] = {
438bc9c3 488 { PCI_VENDOR_ID_VITESSE, 0x7174,
74d0a988 489 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
438bc9c3 490 { PCI_VENDOR_ID_INTEL, 0x3200,
74d0a988 491 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
2d2744fc 492
438bc9c3 493 { } /* terminate list */
1da177e4
LT
494};
495
1da177e4
LT
496static struct pci_driver vsc_sata_pci_driver = {
497 .name = DRV_NAME,
498 .id_table = vsc_sata_pci_tbl,
499 .probe = vsc_sata_init_one,
500 .remove = ata_pci_remove_one,
501};
502
1da177e4
LT
503static int __init vsc_sata_init(void)
504{
b7887196 505 return pci_register_driver(&vsc_sata_pci_driver);
1da177e4
LT
506}
507
1da177e4
LT
508static void __exit vsc_sata_exit(void)
509{
510 pci_unregister_driver(&vsc_sata_pci_driver);
511}
512
1da177e4
LT
513MODULE_AUTHOR("Jeremy Higdon");
514MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
515MODULE_LICENSE("GPL");
516MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
517MODULE_VERSION(DRV_VERSION);
518
519module_init(vsc_sata_init);
520module_exit(vsc_sata_exit);