]>
Commit | Line | Data |
---|---|---|
73b6a2be RK |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/blkdev.h> | |
5 | #include <scsi/scsi_host.h> | |
6 | #include <linux/ata.h> | |
7 | #include <linux/libata.h> | |
8 | ||
9 | #include <asm/dma.h> | |
10 | #include <asm/ecard.h> | |
11 | ||
12 | #define DRV_NAME "pata_icside" | |
13 | ||
14 | #define ICS_IDENT_OFFSET 0x2280 | |
15 | ||
16 | #define ICS_ARCIN_V5_INTRSTAT 0x0000 | |
17 | #define ICS_ARCIN_V5_INTROFFSET 0x0004 | |
18 | ||
19 | #define ICS_ARCIN_V6_INTROFFSET_1 0x2200 | |
20 | #define ICS_ARCIN_V6_INTRSTAT_1 0x2290 | |
21 | #define ICS_ARCIN_V6_INTROFFSET_2 0x3200 | |
22 | #define ICS_ARCIN_V6_INTRSTAT_2 0x3290 | |
23 | ||
24 | struct portinfo { | |
25 | unsigned int dataoffset; | |
26 | unsigned int ctrloffset; | |
27 | unsigned int stepping; | |
28 | }; | |
29 | ||
30 | static const struct portinfo pata_icside_portinfo_v5 = { | |
31 | .dataoffset = 0x2800, | |
32 | .ctrloffset = 0x2b80, | |
33 | .stepping = 6, | |
34 | }; | |
35 | ||
36 | static const struct portinfo pata_icside_portinfo_v6_1 = { | |
37 | .dataoffset = 0x2000, | |
38 | .ctrloffset = 0x2380, | |
39 | .stepping = 6, | |
40 | }; | |
41 | ||
42 | static const struct portinfo pata_icside_portinfo_v6_2 = { | |
43 | .dataoffset = 0x3000, | |
44 | .ctrloffset = 0x3380, | |
45 | .stepping = 6, | |
46 | }; | |
47 | ||
48 | #define PATA_ICSIDE_MAX_SG 128 | |
49 | ||
50 | struct pata_icside_state { | |
51 | void __iomem *irq_port; | |
52 | void __iomem *ioc_base; | |
53 | unsigned int type; | |
54 | unsigned int dma; | |
55 | struct { | |
56 | u8 port_sel; | |
57 | u8 disabled; | |
58 | unsigned int speed[ATA_MAX_DEVICES]; | |
59 | } port[2]; | |
60 | struct scatterlist sg[PATA_ICSIDE_MAX_SG]; | |
61 | }; | |
62 | ||
63 | #define ICS_TYPE_A3IN 0 | |
64 | #define ICS_TYPE_A3USER 1 | |
65 | #define ICS_TYPE_V6 3 | |
66 | #define ICS_TYPE_V5 15 | |
67 | #define ICS_TYPE_NOTYPE ((unsigned int)-1) | |
68 | ||
69 | /* ---------------- Version 5 PCB Support Functions --------------------- */ | |
70 | /* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
71 | * Purpose : enable interrupts from card | |
72 | */ | |
73 | static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
74 | { | |
75 | struct pata_icside_state *state = ec->irq_data; | |
76 | ||
77 | writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); | |
78 | } | |
79 | ||
80 | /* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
81 | * Purpose : disable interrupts from card | |
82 | */ | |
83 | static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
84 | { | |
85 | struct pata_icside_state *state = ec->irq_data; | |
86 | ||
87 | readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); | |
88 | } | |
89 | ||
90 | static const expansioncard_ops_t pata_icside_ops_arcin_v5 = { | |
91 | .irqenable = pata_icside_irqenable_arcin_v5, | |
92 | .irqdisable = pata_icside_irqdisable_arcin_v5, | |
93 | }; | |
94 | ||
95 | ||
96 | /* ---------------- Version 6 PCB Support Functions --------------------- */ | |
97 | /* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
98 | * Purpose : enable interrupts from card | |
99 | */ | |
100 | static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
101 | { | |
102 | struct pata_icside_state *state = ec->irq_data; | |
103 | void __iomem *base = state->irq_port; | |
104 | ||
105 | if (!state->port[0].disabled) | |
106 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); | |
107 | if (!state->port[1].disabled) | |
108 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); | |
109 | } | |
110 | ||
111 | /* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
112 | * Purpose : disable interrupts from card | |
113 | */ | |
114 | static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
115 | { | |
116 | struct pata_icside_state *state = ec->irq_data; | |
117 | ||
118 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | |
119 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | |
120 | } | |
121 | ||
122 | /* Prototype: pata_icside_irqprobe(struct expansion_card *ec) | |
123 | * Purpose : detect an active interrupt from card | |
124 | */ | |
125 | static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec) | |
126 | { | |
127 | struct pata_icside_state *state = ec->irq_data; | |
128 | ||
129 | return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || | |
130 | readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; | |
131 | } | |
132 | ||
133 | static const expansioncard_ops_t pata_icside_ops_arcin_v6 = { | |
134 | .irqenable = pata_icside_irqenable_arcin_v6, | |
135 | .irqdisable = pata_icside_irqdisable_arcin_v6, | |
136 | .irqpending = pata_icside_irqpending_arcin_v6, | |
137 | }; | |
138 | ||
139 | ||
140 | /* | |
141 | * SG-DMA support. | |
142 | * | |
143 | * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. | |
144 | * There is only one DMA controller per card, which means that only | |
145 | * one drive can be accessed at one time. NOTE! We do not enforce that | |
146 | * here, but we rely on the main IDE driver spotting that both | |
147 | * interfaces use the same IRQ, which should guarantee this. | |
148 | */ | |
149 | ||
150 | /* | |
151 | * Configure the IOMD to give the appropriate timings for the transfer | |
152 | * mode being requested. We take the advice of the ATA standards, and | |
153 | * calculate the cycle time based on the transfer mode, and the EIDE | |
154 | * MW DMA specs that the drive provides in the IDENTIFY command. | |
155 | * | |
156 | * We have the following IOMD DMA modes to choose from: | |
157 | * | |
158 | * Type Active Recovery Cycle | |
159 | * A 250 (250) 312 (550) 562 (800) | |
160 | * B 187 (200) 250 (550) 437 (750) | |
161 | * C 125 (125) 125 (375) 250 (500) | |
162 | * D 62 (50) 125 (375) 187 (425) | |
163 | * | |
164 | * (figures in brackets are actual measured timings on DIOR/DIOW) | |
165 | * | |
166 | * However, we also need to take care of the read/write active and | |
167 | * recovery timings: | |
168 | * | |
169 | * Read Write | |
170 | * Mode Active -- Recovery -- Cycle IOMD type | |
171 | * MW0 215 50 215 480 A | |
172 | * MW1 80 50 50 150 C | |
173 | * MW2 70 25 25 120 C | |
174 | */ | |
175 | static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |
176 | { | |
177 | struct pata_icside_state *state = ap->host->private_data; | |
178 | struct ata_timing t; | |
179 | unsigned int cycle; | |
180 | char iomd_type; | |
181 | ||
182 | /* | |
183 | * DMA is based on a 16MHz clock | |
184 | */ | |
185 | if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1)) | |
186 | return; | |
187 | ||
188 | /* | |
189 | * Choose the IOMD cycle timing which ensure that the interface | |
190 | * satisfies the measured active, recovery and cycle times. | |
191 | */ | |
192 | if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) | |
193 | iomd_type = 'D', cycle = 187; | |
194 | else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) | |
195 | iomd_type = 'C', cycle = 250; | |
196 | else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) | |
197 | iomd_type = 'B', cycle = 437; | |
198 | else | |
199 | iomd_type = 'A', cycle = 562; | |
200 | ||
201 | ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n", | |
202 | t.active, t.recover, t.cycle, iomd_type); | |
203 | ||
204 | state->port[ap->port_no].speed[adev->devno] = cycle; | |
205 | } | |
206 | ||
207 | static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |
208 | { | |
209 | struct ata_port *ap = qc->ap; | |
210 | struct pata_icside_state *state = ap->host->private_data; | |
211 | struct scatterlist *sg, *rsg = state->sg; | |
212 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; | |
213 | ||
214 | /* | |
215 | * We are simplex; BUG if we try to fiddle with DMA | |
216 | * while it's active. | |
217 | */ | |
218 | BUG_ON(dma_channel_active(state->dma)); | |
219 | ||
220 | /* | |
221 | * Copy ATAs scattered sg list into a contiguous array of sg | |
222 | */ | |
223 | ata_for_each_sg(sg, qc) { | |
224 | memcpy(rsg, sg, sizeof(*sg)); | |
225 | rsg++; | |
226 | } | |
227 | ||
228 | /* | |
229 | * Route the DMA signals to the correct interface | |
230 | */ | |
231 | writeb(state->port[ap->port_no].port_sel, state->ioc_base); | |
232 | ||
233 | set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); | |
234 | set_dma_sg(state->dma, state->sg, rsg - state->sg); | |
235 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); | |
236 | ||
237 | /* issue r/w command */ | |
238 | ap->ops->exec_command(ap, &qc->tf); | |
239 | } | |
240 | ||
241 | static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) | |
242 | { | |
243 | struct ata_port *ap = qc->ap; | |
244 | struct pata_icside_state *state = ap->host->private_data; | |
245 | ||
246 | BUG_ON(dma_channel_active(state->dma)); | |
247 | enable_dma(state->dma); | |
248 | } | |
249 | ||
250 | static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) | |
251 | { | |
252 | struct ata_port *ap = qc->ap; | |
253 | struct pata_icside_state *state = ap->host->private_data; | |
254 | ||
255 | disable_dma(state->dma); | |
256 | ||
257 | /* see ata_bmdma_stop */ | |
258 | ata_altstatus(ap); | |
259 | } | |
260 | ||
261 | static u8 pata_icside_bmdma_status(struct ata_port *ap) | |
262 | { | |
263 | struct pata_icside_state *state = ap->host->private_data; | |
264 | void __iomem *irq_port; | |
265 | ||
266 | irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 : | |
267 | ICS_ARCIN_V6_INTRSTAT_1); | |
268 | ||
269 | return readb(irq_port) & 1 ? ATA_DMA_INTR : 0; | |
270 | } | |
271 | ||
272 | static int icside_dma_init(struct ata_probe_ent *ae, struct expansion_card *ec) | |
273 | { | |
274 | struct pata_icside_state *state = ae->private_data; | |
275 | int i; | |
276 | ||
277 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | |
278 | state->port[0].speed[i] = 480; | |
279 | state->port[1].speed[i] = 480; | |
280 | } | |
281 | ||
282 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { | |
283 | state->dma = ec->dma; | |
284 | ae->mwdma_mask = 0x07; /* MW0..2 */ | |
285 | } | |
286 | ||
287 | return 0; | |
288 | } | |
289 | ||
290 | ||
291 | static int pata_icside_port_start(struct ata_port *ap) | |
292 | { | |
293 | /* No PRD to alloc */ | |
294 | return ata_pad_alloc(ap, ap->dev); | |
295 | } | |
296 | ||
297 | static struct scsi_host_template pata_icside_sht = { | |
298 | .module = THIS_MODULE, | |
299 | .name = DRV_NAME, | |
300 | .ioctl = ata_scsi_ioctl, | |
301 | .queuecommand = ata_scsi_queuecmd, | |
302 | .can_queue = ATA_DEF_QUEUE, | |
303 | .this_id = ATA_SHT_THIS_ID, | |
304 | .sg_tablesize = PATA_ICSIDE_MAX_SG, | |
305 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | |
306 | .emulated = ATA_SHT_EMULATED, | |
307 | .use_clustering = ATA_SHT_USE_CLUSTERING, | |
308 | .proc_name = DRV_NAME, | |
309 | .dma_boundary = ~0, /* no dma boundaries */ | |
310 | .slave_configure = ata_scsi_slave_config, | |
311 | .slave_destroy = ata_scsi_slave_destroy, | |
312 | .bios_param = ata_std_bios_param, | |
313 | }; | |
314 | ||
315 | /* wish this was exported from libata-core */ | |
316 | static void ata_dummy_noret(struct ata_port *port) | |
317 | { | |
318 | } | |
319 | ||
320 | /* | |
321 | * We need to shut down unused ports to prevent spurious interrupts. | |
322 | * FIXME: the libata core doesn't call this function for PATA interfaces. | |
323 | */ | |
324 | static void pata_icside_port_disable(struct ata_port *ap) | |
325 | { | |
326 | struct pata_icside_state *state = ap->host->private_data; | |
327 | ||
328 | ata_port_printk(ap, KERN_ERR, "disabling icside port\n"); | |
329 | ||
330 | ata_port_disable(ap); | |
331 | ||
332 | state->port[ap->port_no].disabled = 1; | |
333 | ||
334 | if (state->type == ICS_TYPE_V6) { | |
335 | /* | |
336 | * Disable interrupts from this port, otherwise we | |
337 | * receive spurious interrupts from the floating | |
338 | * interrupt line. | |
339 | */ | |
340 | void __iomem *irq_port = state->irq_port + | |
341 | (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1); | |
342 | readb(irq_port); | |
343 | } | |
344 | } | |
345 | ||
346 | static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq) | |
347 | { | |
348 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | |
349 | u8 status; | |
350 | ||
351 | status = ata_busy_wait(ap, bits, 1000); | |
352 | if (status & bits) | |
353 | if (ata_msg_err(ap)) | |
354 | printk(KERN_ERR "abnormal status 0x%X\n", status); | |
355 | ||
356 | if (ata_msg_intr(ap)) | |
357 | printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n", | |
358 | __FUNCTION__, status); | |
359 | ||
360 | return status; | |
361 | } | |
362 | ||
363 | static struct ata_port_operations pata_icside_port_ops = { | |
364 | .port_disable = pata_icside_port_disable, | |
365 | ||
366 | .set_dmamode = pata_icside_set_dmamode, | |
367 | ||
368 | .tf_load = ata_tf_load, | |
369 | .tf_read = ata_tf_read, | |
370 | .exec_command = ata_exec_command, | |
371 | .check_status = ata_check_status, | |
372 | .dev_select = ata_std_dev_select, | |
373 | ||
374 | .bmdma_setup = pata_icside_bmdma_setup, | |
375 | .bmdma_start = pata_icside_bmdma_start, | |
376 | ||
377 | .data_xfer = ata_data_xfer_noirq, | |
378 | ||
379 | /* no need to build any PRD tables for DMA */ | |
380 | .qc_prep = ata_noop_qc_prep, | |
381 | .qc_issue = ata_qc_issue_prot, | |
382 | ||
383 | .freeze = ata_bmdma_freeze, | |
384 | .thaw = ata_bmdma_thaw, | |
385 | .error_handler = ata_bmdma_error_handler, | |
386 | .post_internal_cmd = pata_icside_bmdma_stop, | |
387 | ||
388 | .irq_handler = ata_interrupt, | |
389 | .irq_clear = ata_dummy_noret, | |
390 | .irq_on = ata_irq_on, | |
391 | .irq_ack = pata_icside_irq_ack, | |
392 | ||
393 | .port_start = pata_icside_port_start, | |
394 | ||
395 | .bmdma_stop = pata_icside_bmdma_stop, | |
396 | .bmdma_status = pata_icside_bmdma_status, | |
397 | }; | |
398 | ||
399 | static void | |
400 | pata_icside_add_port(struct ata_probe_ent *ae, void __iomem *base, | |
401 | const struct portinfo *info) | |
402 | { | |
403 | struct ata_ioports *ioaddr = &ae->port[ae->n_ports++]; | |
404 | void __iomem *cmd = base + info->dataoffset; | |
405 | ||
406 | ioaddr->cmd_addr = cmd; | |
407 | ioaddr->data_addr = cmd + (ATA_REG_DATA << info->stepping); | |
408 | ioaddr->error_addr = cmd + (ATA_REG_ERR << info->stepping); | |
409 | ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << info->stepping); | |
410 | ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << info->stepping); | |
411 | ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << info->stepping); | |
412 | ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << info->stepping); | |
413 | ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << info->stepping); | |
414 | ioaddr->device_addr = cmd + (ATA_REG_DEVICE << info->stepping); | |
415 | ioaddr->status_addr = cmd + (ATA_REG_STATUS << info->stepping); | |
416 | ioaddr->command_addr = cmd + (ATA_REG_CMD << info->stepping); | |
417 | ||
418 | ioaddr->ctl_addr = base + info->ctrloffset; | |
419 | ioaddr->altstatus_addr = ioaddr->ctl_addr; | |
420 | } | |
421 | ||
422 | static int __init | |
423 | pata_icside_register_v5(struct ata_probe_ent *ae, struct expansion_card *ec) | |
424 | { | |
425 | struct pata_icside_state *state = ae->private_data; | |
426 | void __iomem *base; | |
427 | ||
428 | base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC), | |
429 | ecard_resource_len(ec, ECARD_RES_MEMC)); | |
430 | if (!base) | |
431 | return -ENOMEM; | |
432 | ||
433 | state->irq_port = base; | |
434 | ||
435 | ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; | |
436 | ec->irqmask = 1; | |
437 | ec->irq_data = state; | |
438 | ec->ops = &pata_icside_ops_arcin_v5; | |
439 | ||
440 | /* | |
441 | * Be on the safe side - disable interrupts | |
442 | */ | |
443 | ec->ops->irqdisable(ec, ec->irq); | |
444 | ||
445 | pata_icside_add_port(ae, base, &pata_icside_portinfo_v5); | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
450 | static int __init | |
451 | pata_icside_register_v6(struct ata_probe_ent *ae, struct expansion_card *ec) | |
452 | { | |
453 | struct pata_icside_state *state = ae->private_data; | |
454 | void __iomem *ioc_base, *easi_base; | |
455 | unsigned int sel = 0; | |
456 | int ret; | |
457 | ||
458 | ioc_base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST), | |
459 | ecard_resource_len(ec, ECARD_RES_IOCFAST)); | |
460 | if (!ioc_base) { | |
461 | ret = -ENOMEM; | |
462 | goto out; | |
463 | } | |
464 | ||
465 | easi_base = ioc_base; | |
466 | ||
467 | if (ecard_resource_flags(ec, ECARD_RES_EASI)) { | |
468 | easi_base = ioremap(ecard_resource_start(ec, ECARD_RES_EASI), | |
469 | ecard_resource_len(ec, ECARD_RES_EASI)); | |
470 | if (!easi_base) { | |
471 | ret = -ENOMEM; | |
472 | goto unmap_slot; | |
473 | } | |
474 | ||
475 | /* | |
476 | * Enable access to the EASI region. | |
477 | */ | |
478 | sel = 1 << 5; | |
479 | } | |
480 | ||
481 | writeb(sel, ioc_base); | |
482 | ||
483 | ec->irq_data = state; | |
484 | ec->ops = &pata_icside_ops_arcin_v6; | |
485 | ||
486 | state->irq_port = easi_base; | |
487 | state->ioc_base = ioc_base; | |
488 | state->port[0].port_sel = sel; | |
489 | state->port[1].port_sel = sel | 1; | |
490 | ||
491 | /* | |
492 | * Be on the safe side - disable interrupts | |
493 | */ | |
494 | ec->ops->irqdisable(ec, ec->irq); | |
495 | ||
496 | /* | |
497 | * Find and register the interfaces. | |
498 | */ | |
499 | pata_icside_add_port(ae, easi_base, &pata_icside_portinfo_v6_1); | |
500 | pata_icside_add_port(ae, easi_base, &pata_icside_portinfo_v6_2); | |
501 | ||
502 | /* | |
503 | * FIXME: work around libata's aversion to calling port_disable. | |
504 | * This permanently disables interrupts on port 0 - bad luck if | |
505 | * you have a drive on that port. | |
506 | */ | |
507 | state->port[0].disabled = 1; | |
508 | ||
509 | return icside_dma_init(ae, ec); | |
510 | ||
511 | unmap_slot: | |
512 | iounmap(ioc_base); | |
513 | out: | |
514 | return ret; | |
515 | } | |
516 | ||
517 | static int __devinit | |
518 | pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id) | |
519 | { | |
520 | struct pata_icside_state *state; | |
521 | struct ata_probe_ent ae; | |
522 | void __iomem *idmem; | |
523 | int ret; | |
524 | ||
525 | ret = ecard_request_resources(ec); | |
526 | if (ret) | |
527 | goto out; | |
528 | ||
529 | state = kzalloc(sizeof(struct pata_icside_state), GFP_KERNEL); | |
530 | if (!state) { | |
531 | ret = -ENOMEM; | |
532 | goto release; | |
533 | } | |
534 | ||
535 | state->type = ICS_TYPE_NOTYPE; | |
536 | state->dma = NO_DMA; | |
537 | ||
538 | idmem = ioremap(ecard_resource_start(ec, ECARD_RES_IOCFAST), | |
539 | ecard_resource_len(ec, ECARD_RES_IOCFAST)); | |
540 | if (idmem) { | |
541 | unsigned int type; | |
542 | ||
543 | type = readb(idmem + ICS_IDENT_OFFSET) & 1; | |
544 | type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; | |
545 | type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; | |
546 | type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; | |
547 | iounmap(idmem); | |
548 | ||
549 | state->type = type; | |
550 | } | |
551 | ||
552 | memset(&ae, 0, sizeof(ae)); | |
553 | INIT_LIST_HEAD(&ae.node); | |
554 | ae.dev = &ec->dev; | |
555 | ae.port_ops = &pata_icside_port_ops; | |
556 | ae.sht = &pata_icside_sht; | |
557 | ae.pio_mask = 0x1f; | |
558 | ae.irq = ec->irq; | |
559 | ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; | |
560 | ae._host_flags = ATA_HOST_SIMPLEX; | |
561 | ae.private_data = state; | |
562 | ||
563 | switch (state->type) { | |
564 | case ICS_TYPE_A3IN: | |
565 | dev_warn(&ec->dev, "A3IN unsupported\n"); | |
566 | ret = -ENODEV; | |
567 | break; | |
568 | ||
569 | case ICS_TYPE_A3USER: | |
570 | dev_warn(&ec->dev, "A3USER unsupported\n"); | |
571 | ret = -ENODEV; | |
572 | break; | |
573 | ||
574 | case ICS_TYPE_V5: | |
575 | ret = pata_icside_register_v5(&ae, ec); | |
576 | break; | |
577 | ||
578 | case ICS_TYPE_V6: | |
579 | ret = pata_icside_register_v6(&ae, ec); | |
580 | break; | |
581 | ||
582 | default: | |
583 | dev_warn(&ec->dev, "unknown interface type\n"); | |
584 | ret = -ENODEV; | |
585 | break; | |
586 | } | |
587 | ||
588 | if (ret == 0) | |
589 | ret = ata_device_add(&ae) == 0 ? -ENODEV : 0; | |
590 | ||
591 | if (ret == 0) | |
592 | goto out; | |
593 | ||
594 | kfree(state); | |
595 | release: | |
596 | ecard_release_resources(ec); | |
597 | out: | |
598 | return ret; | |
599 | } | |
600 | ||
601 | static void pata_icside_shutdown(struct expansion_card *ec) | |
602 | { | |
603 | struct ata_host *host = ecard_get_drvdata(ec); | |
604 | unsigned long flags; | |
605 | ||
606 | /* | |
607 | * Disable interrupts from this card. We need to do | |
608 | * this before disabling EASI since we may be accessing | |
609 | * this register via that region. | |
610 | */ | |
611 | local_irq_save(flags); | |
612 | if (ec->ops) | |
613 | ec->ops->irqdisable(ec, ec->irq); | |
614 | local_irq_restore(flags); | |
615 | ||
616 | /* | |
617 | * Reset the ROM pointer so that we can read the ROM | |
618 | * after a soft reboot. This also disables access to | |
619 | * the IDE taskfile via the EASI region. | |
620 | */ | |
621 | if (host) { | |
622 | struct pata_icside_state *state = host->private_data; | |
623 | if (state->ioc_base) | |
624 | writeb(0, state->ioc_base); | |
625 | } | |
626 | } | |
627 | ||
628 | static void __devexit pata_icside_remove(struct expansion_card *ec) | |
629 | { | |
630 | struct ata_host *host = ecard_get_drvdata(ec); | |
631 | struct pata_icside_state *state = host->private_data; | |
632 | ||
633 | ata_host_detach(host); | |
634 | ||
635 | pata_icside_shutdown(ec); | |
636 | ||
637 | /* | |
638 | * don't NULL out the drvdata - devres/libata wants it | |
639 | * to free the ata_host structure. | |
640 | */ | |
641 | ec->ops = NULL; | |
642 | ec->irq_data = NULL; | |
643 | ||
644 | if (state->dma != NO_DMA) | |
645 | free_dma(state->dma); | |
646 | if (state->ioc_base) | |
647 | iounmap(state->ioc_base); | |
648 | if (state->ioc_base != state->irq_port) | |
649 | iounmap(state->irq_port); | |
650 | ||
651 | kfree(state); | |
652 | ecard_release_resources(ec); | |
653 | } | |
654 | ||
655 | static const struct ecard_id pata_icside_ids[] = { | |
656 | { MANU_ICS, PROD_ICS_IDE }, | |
657 | { MANU_ICS2, PROD_ICS2_IDE }, | |
658 | { 0xffff, 0xffff } | |
659 | }; | |
660 | ||
661 | static struct ecard_driver pata_icside_driver = { | |
662 | .probe = pata_icside_probe, | |
663 | .remove = __devexit_p(pata_icside_remove), | |
664 | .shutdown = pata_icside_shutdown, | |
665 | .id_table = pata_icside_ids, | |
666 | .drv = { | |
667 | .name = DRV_NAME, | |
668 | }, | |
669 | }; | |
670 | ||
671 | static int __init pata_icside_init(void) | |
672 | { | |
673 | return ecard_register_driver(&pata_icside_driver); | |
674 | } | |
675 | ||
676 | static void __exit pata_icside_exit(void) | |
677 | { | |
678 | ecard_remove_driver(&pata_icside_driver); | |
679 | } | |
680 | ||
681 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); | |
682 | MODULE_LICENSE("GPL"); | |
683 | MODULE_DESCRIPTION("ICS PATA driver"); | |
684 | ||
685 | module_init(pata_icside_init); | |
686 | module_exit(pata_icside_exit); |