]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/scsi/gdth.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 70
[mirror_ubuntu-hirsute-kernel.git] / drivers / scsi / gdth.c
1 /************************************************************************
2 * Linux driver for *
3 * ICP vortex GmbH: GDT PCI Disk Array Controllers *
4 * Intel Corporation: Storage RAID Controllers *
5 * *
6 * gdth.c *
7 * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
8 * Copyright (C) 2002-04 Intel Corporation *
9 * Copyright (C) 2003-06 Adaptec Inc. *
10 * <achim_leubner@adaptec.com> *
11 * *
12 * Additions/Fixes: *
13 * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
14 * Johannes Dinner <johannes_dinner@adaptec.com> *
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published *
18 * by the Free Software Foundation; either version 2 of the License, *
19 * or (at your option) any later version. *
20 * *
21 * This program is distributed in the hope that it will be useful, *
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
24 * GNU General Public License for more details. *
25 * *
26 * You should have received a copy of the GNU General Public License *
27 * along with this kernel; if not, write to the Free Software *
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
29 * *
30 * Linux kernel 2.6.x supported *
31 * *
32 ************************************************************************/
33
34 /* All GDT Disk Array Controllers are fully supported by this driver.
35 * This includes the PCI SCSI Disk Array Controllers and the
36 * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
37 * list of all controller types.
38 *
39 * After the optional list of IRQ values, other possible
40 * command line options are:
41 * disable:Y disable driver
42 * disable:N enable driver
43 * reserve_mode:0 reserve no drives for the raw service
44 * reserve_mode:1 reserve all not init., removable drives
45 * reserve_mode:2 reserve all not init. drives
46 * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
47 * h- controller no., b- channel no.,
48 * t- target ID, l- LUN
49 * reverse_scan:Y reverse scan order for PCI controllers
50 * reverse_scan:N scan PCI controllers like BIOS
51 * max_ids:x x - target ID count per channel (1..MAXID)
52 * rescan:Y rescan all channels/IDs
53 * rescan:N use all devices found until now
54 * hdr_channel:x x - number of virtual bus for host drives
55 * shared_access:Y disable driver reserve/release protocol to
56 * access a shared resource from several nodes,
57 * appropriate controller firmware required
58 * shared_access:N enable driver reserve/release protocol
59 * force_dma32:Y use only 32 bit DMA mode
60 * force_dma32:N use 64 bit DMA mode, if supported
61 *
62 * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
63 * max_ids:127,rescan:N,hdr_channel:0,
64 * shared_access:Y,force_dma32:N".
65 * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
66 *
67 * When loading the gdth driver as a module, the same options are available.
68 * You can set the IRQs with "IRQ=...". However, the syntax to specify the
69 * options changes slightly. You must replace all ',' between options
70 * with ' ' and all ':' with '=' and you must use
71 * '1' in place of 'Y' and '0' in place of 'N'.
72 *
73 * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
74 * max_ids=127 rescan=0 hdr_channel=0 shared_access=0
75 * force_dma32=0"
76 * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
77 */
78
79 /* The meaning of the Scsi_Pointer members in this driver is as follows:
80 * ptr: Chaining
81 * this_residual: unused
82 * buffer: unused
83 * dma_handle: unused
84 * buffers_residual: unused
85 * Status: unused
86 * Message: unused
87 * have_data_in: unused
88 * sent_command: unused
89 * phase: unused
90 */
91
92 /* statistics */
93 #define GDTH_STATISTICS
94
95 #include <linux/module.h>
96
97 #include <linux/version.h>
98 #include <linux/kernel.h>
99 #include <linux/types.h>
100 #include <linux/pci.h>
101 #include <linux/string.h>
102 #include <linux/ctype.h>
103 #include <linux/ioport.h>
104 #include <linux/delay.h>
105 #include <linux/interrupt.h>
106 #include <linux/in.h>
107 #include <linux/proc_fs.h>
108 #include <linux/time.h>
109 #include <linux/timer.h>
110 #include <linux/dma-mapping.h>
111 #include <linux/list.h>
112 #include <linux/mutex.h>
113 #include <linux/slab.h>
114 #include <linux/reboot.h>
115
116 #include <asm/dma.h>
117 #include <asm/io.h>
118 #include <linux/uaccess.h>
119 #include <linux/spinlock.h>
120 #include <linux/blkdev.h>
121 #include <linux/scatterlist.h>
122
123 #include "scsi.h"
124 #include <scsi/scsi_host.h>
125 #include "gdth.h"
126
127 static DEFINE_MUTEX(gdth_mutex);
128 static void gdth_delay(int milliseconds);
129 static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
130 static irqreturn_t gdth_interrupt(int irq, void *dev_id);
131 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
132 int gdth_from_wait, int* pIndex);
133 static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
134 struct scsi_cmnd *scp);
135 static int gdth_async_event(gdth_ha_str *ha);
136 static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
137
138 static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
139 static void gdth_next(gdth_ha_str *ha);
140 static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
141 static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
142 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
143 u16 idx, gdth_evt_data *evt);
144 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
145 static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
146 gdth_evt_str *estr);
147 static void gdth_clear_events(void);
148
149 static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
150 char *buffer, u16 count);
151 static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
152 static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
153 u16 hdrive);
154
155 static void gdth_enable_int(gdth_ha_str *ha);
156 static int gdth_test_busy(gdth_ha_str *ha);
157 static int gdth_get_cmd_index(gdth_ha_str *ha);
158 static void gdth_release_event(gdth_ha_str *ha);
159 static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
160 static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
161 u32 p1, u64 p2,u64 p3);
162 static int gdth_search_drives(gdth_ha_str *ha);
163 static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
164
165 static const char *gdth_ctr_name(gdth_ha_str *ha);
166
167 static int gdth_open(struct inode *inode, struct file *filep);
168 static int gdth_close(struct inode *inode, struct file *filep);
169 static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
170 unsigned long arg);
171
172 static void gdth_flush(gdth_ha_str *ha);
173 static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
174 static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
175 struct gdth_cmndinfo *cmndinfo);
176 static void gdth_scsi_done(struct scsi_cmnd *scp);
177
178 #ifdef DEBUG_GDTH
179 static u8 DebugState = DEBUG_GDTH;
180 #define TRACE(a) {if (DebugState==1) {printk a;}}
181 #define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
182 #define TRACE3(a) {if (DebugState!=0) {printk a;}}
183 #else /* !DEBUG */
184 #define TRACE(a)
185 #define TRACE2(a)
186 #define TRACE3(a)
187 #endif
188
189 #ifdef GDTH_STATISTICS
190 static u32 max_rq=0, max_index=0, max_sg=0;
191 static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
192 static struct timer_list gdth_timer;
193 #endif
194
195 #define PTR2USHORT(a) (u16)(unsigned long)(a)
196 #define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
197 #define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
198
199 #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
200
201 static u8 gdth_polling; /* polling if TRUE */
202 static int gdth_ctr_count = 0; /* controller count */
203 static LIST_HEAD(gdth_instances); /* controller list */
204 static u8 gdth_write_through = FALSE; /* write through */
205 static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
206 static int elastidx;
207 static int eoldidx;
208 static int major;
209
210 #define DIN 1 /* IN data direction */
211 #define DOU 2 /* OUT data direction */
212 #define DNO DIN /* no data transfer */
213 #define DUN DIN /* unknown data direction */
214 static u8 gdth_direction_tab[0x100] = {
215 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
216 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
217 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
218 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
219 DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
220 DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
221 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
222 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
223 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
224 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
225 DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
226 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
227 DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
228 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
229 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
230 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
231 };
232
233 /* LILO and modprobe/insmod parameters */
234 /* disable driver flag */
235 static int disable __initdata = 0;
236 /* reserve flag */
237 static int reserve_mode = 1;
238 /* reserve list */
239 static int reserve_list[MAX_RES_ARGS] =
240 {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
241 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
242 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
243 /* scan order for PCI controllers */
244 static int reverse_scan = 0;
245 /* virtual channel for the host drives */
246 static int hdr_channel = 0;
247 /* max. IDs per channel */
248 static int max_ids = MAXID;
249 /* rescan all IDs */
250 static int rescan = 0;
251 /* shared access */
252 static int shared_access = 1;
253 /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
254 static int force_dma32 = 0;
255
256 /* parameters for modprobe/insmod */
257 module_param(disable, int, 0);
258 module_param(reserve_mode, int, 0);
259 module_param_array(reserve_list, int, NULL, 0);
260 module_param(reverse_scan, int, 0);
261 module_param(hdr_channel, int, 0);
262 module_param(max_ids, int, 0);
263 module_param(rescan, int, 0);
264 module_param(shared_access, int, 0);
265 module_param(force_dma32, int, 0);
266 MODULE_AUTHOR("Achim Leubner");
267 MODULE_LICENSE("GPL");
268
269 /* ioctl interface */
270 static const struct file_operations gdth_fops = {
271 .unlocked_ioctl = gdth_unlocked_ioctl,
272 .open = gdth_open,
273 .release = gdth_close,
274 .llseek = noop_llseek,
275 };
276
277 #include "gdth_proc.h"
278 #include "gdth_proc.c"
279
280 static gdth_ha_str *gdth_find_ha(int hanum)
281 {
282 gdth_ha_str *ha;
283
284 list_for_each_entry(ha, &gdth_instances, list)
285 if (hanum == ha->hanum)
286 return ha;
287
288 return NULL;
289 }
290
291 static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
292 {
293 struct gdth_cmndinfo *priv = NULL;
294 unsigned long flags;
295 int i;
296
297 spin_lock_irqsave(&ha->smp_lock, flags);
298
299 for (i=0; i<GDTH_MAXCMDS; ++i) {
300 if (ha->cmndinfo[i].index == 0) {
301 priv = &ha->cmndinfo[i];
302 memset(priv, 0, sizeof(*priv));
303 priv->index = i+1;
304 break;
305 }
306 }
307
308 spin_unlock_irqrestore(&ha->smp_lock, flags);
309
310 return priv;
311 }
312
313 static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
314 {
315 BUG_ON(!priv);
316 priv->index = 0;
317 }
318
319 static void gdth_delay(int milliseconds)
320 {
321 if (milliseconds == 0) {
322 udelay(1);
323 } else {
324 mdelay(milliseconds);
325 }
326 }
327
328 static void gdth_scsi_done(struct scsi_cmnd *scp)
329 {
330 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
331 int internal_command = cmndinfo->internal_command;
332
333 TRACE2(("gdth_scsi_done()\n"));
334
335 gdth_put_cmndinfo(cmndinfo);
336 scp->host_scribble = NULL;
337
338 if (internal_command)
339 complete((struct completion *)scp->request);
340 else
341 scp->scsi_done(scp);
342 }
343
344 int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
345 int timeout, u32 *info)
346 {
347 gdth_ha_str *ha = shost_priv(sdev->host);
348 struct scsi_cmnd *scp;
349 struct gdth_cmndinfo cmndinfo;
350 DECLARE_COMPLETION_ONSTACK(wait);
351 int rval;
352
353 scp = kzalloc(sizeof(*scp), GFP_KERNEL);
354 if (!scp)
355 return -ENOMEM;
356
357 scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
358 if (!scp->sense_buffer) {
359 kfree(scp);
360 return -ENOMEM;
361 }
362
363 scp->device = sdev;
364 memset(&cmndinfo, 0, sizeof(cmndinfo));
365
366 /* use request field to save the ptr. to completion struct. */
367 scp->request = (struct request *)&wait;
368 scp->cmd_len = 12;
369 scp->cmnd = cmnd;
370 cmndinfo.priority = IOCTL_PRI;
371 cmndinfo.internal_cmd_str = gdtcmd;
372 cmndinfo.internal_command = 1;
373
374 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
375 __gdth_queuecommand(ha, scp, &cmndinfo);
376
377 wait_for_completion(&wait);
378
379 rval = cmndinfo.status;
380 if (info)
381 *info = cmndinfo.info;
382 kfree(scp->sense_buffer);
383 kfree(scp);
384 return rval;
385 }
386
387 int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
388 int timeout, u32 *info)
389 {
390 struct scsi_device *sdev = scsi_get_host_dev(shost);
391 int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
392
393 scsi_free_host_dev(sdev);
394 return rval;
395 }
396
397 static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
398 {
399 *cyls = size /HEADS/SECS;
400 if (*cyls <= MAXCYLS) {
401 *heads = HEADS;
402 *secs = SECS;
403 } else { /* too high for 64*32 */
404 *cyls = size /MEDHEADS/MEDSECS;
405 if (*cyls <= MAXCYLS) {
406 *heads = MEDHEADS;
407 *secs = MEDSECS;
408 } else { /* too high for 127*63 */
409 *cyls = size /BIGHEADS/BIGSECS;
410 *heads = BIGHEADS;
411 *secs = BIGSECS;
412 }
413 }
414 }
415
416 static bool gdth_search_vortex(u16 device)
417 {
418 if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
419 return true;
420 if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
421 device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
422 return true;
423 if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
424 device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
425 return true;
426 return false;
427 }
428
429 static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
430 static int gdth_pci_init_one(struct pci_dev *pdev,
431 const struct pci_device_id *ent);
432 static void gdth_pci_remove_one(struct pci_dev *pdev);
433 static void gdth_remove_one(gdth_ha_str *ha);
434
435 /* Vortex only makes RAID controllers.
436 * We do not really want to specify all 550 ids here, so wildcard match.
437 */
438 static const struct pci_device_id gdthtable[] = {
439 { PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
442 { } /* terminate list */
443 };
444 MODULE_DEVICE_TABLE(pci, gdthtable);
445
446 static struct pci_driver gdth_pci_driver = {
447 .name = "gdth",
448 .id_table = gdthtable,
449 .probe = gdth_pci_init_one,
450 .remove = gdth_pci_remove_one,
451 };
452
453 static void gdth_pci_remove_one(struct pci_dev *pdev)
454 {
455 gdth_ha_str *ha = pci_get_drvdata(pdev);
456
457 list_del(&ha->list);
458 gdth_remove_one(ha);
459
460 pci_disable_device(pdev);
461 }
462
463 static int gdth_pci_init_one(struct pci_dev *pdev,
464 const struct pci_device_id *ent)
465 {
466 u16 vendor = pdev->vendor;
467 u16 device = pdev->device;
468 unsigned long base0, base1, base2;
469 int rc;
470 gdth_pci_str gdth_pcistr;
471 gdth_ha_str *ha = NULL;
472
473 TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
474 gdth_ctr_count, vendor, device));
475
476 memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
477
478 if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
479 return -ENODEV;
480
481 rc = pci_enable_device(pdev);
482 if (rc)
483 return rc;
484
485 if (gdth_ctr_count >= MAXHA)
486 return -EBUSY;
487
488 /* GDT PCI controller found, resources are already in pdev */
489 gdth_pcistr.pdev = pdev;
490 base0 = pci_resource_flags(pdev, 0);
491 base1 = pci_resource_flags(pdev, 1);
492 base2 = pci_resource_flags(pdev, 2);
493 if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
494 device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
495 if (!(base0 & IORESOURCE_MEM))
496 return -ENODEV;
497 gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
498 } else { /* GDT6110, GDT6120, .. */
499 if (!(base0 & IORESOURCE_MEM) ||
500 !(base2 & IORESOURCE_MEM) ||
501 !(base1 & IORESOURCE_IO))
502 return -ENODEV;
503 gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
504 gdth_pcistr.io = pci_resource_start(pdev, 1);
505 }
506 TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
507 gdth_pcistr.pdev->bus->number,
508 PCI_SLOT(gdth_pcistr.pdev->devfn),
509 gdth_pcistr.irq,
510 gdth_pcistr.dpmem));
511
512 rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
513 if (rc)
514 return rc;
515
516 return 0;
517 }
518
519 static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
520 gdth_ha_str *ha)
521 {
522 register gdt6_dpram_str __iomem *dp6_ptr;
523 register gdt6c_dpram_str __iomem *dp6c_ptr;
524 register gdt6m_dpram_str __iomem *dp6m_ptr;
525 u32 retries;
526 u8 prot_ver;
527 u16 command;
528 int i, found = FALSE;
529
530 TRACE(("gdth_init_pci()\n"));
531
532 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
533 ha->oem_id = OEM_ID_INTEL;
534 else
535 ha->oem_id = OEM_ID_ICP;
536 ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
537 ha->stype = (u32)pdev->device;
538 ha->irq = pdev->irq;
539 ha->pdev = pdev;
540
541 if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
542 TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
543 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
544 if (ha->brd == NULL) {
545 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
546 return 0;
547 }
548 /* check and reset interface area */
549 dp6_ptr = ha->brd;
550 writel(DPMEM_MAGIC, &dp6_ptr->u);
551 if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
552 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
553 pcistr->dpmem);
554 found = FALSE;
555 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
556 iounmap(ha->brd);
557 ha->brd = ioremap(i, sizeof(u16));
558 if (ha->brd == NULL) {
559 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
560 return 0;
561 }
562 if (readw(ha->brd) != 0xffff) {
563 TRACE2(("init_pci_old() address 0x%x busy\n", i));
564 continue;
565 }
566 iounmap(ha->brd);
567 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
568 ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
569 if (ha->brd == NULL) {
570 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
571 return 0;
572 }
573 dp6_ptr = ha->brd;
574 writel(DPMEM_MAGIC, &dp6_ptr->u);
575 if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
576 printk("GDT-PCI: Use free address at 0x%x\n", i);
577 found = TRUE;
578 break;
579 }
580 }
581 if (!found) {
582 printk("GDT-PCI: No free address found!\n");
583 iounmap(ha->brd);
584 return 0;
585 }
586 }
587 memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
588 if (readl(&dp6_ptr->u) != 0) {
589 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
590 iounmap(ha->brd);
591 return 0;
592 }
593
594 /* disable board interrupts, deinit services */
595 writeb(0xff, &dp6_ptr->io.irqdel);
596 writeb(0x00, &dp6_ptr->io.irqen);
597 writeb(0x00, &dp6_ptr->u.ic.S_Status);
598 writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
599
600 writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
601 writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
602 writeb(0, &dp6_ptr->io.event);
603 retries = INIT_RETRIES;
604 gdth_delay(20);
605 while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
606 if (--retries == 0) {
607 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
608 iounmap(ha->brd);
609 return 0;
610 }
611 gdth_delay(1);
612 }
613 prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
614 writeb(0, &dp6_ptr->u.ic.S_Status);
615 writeb(0xff, &dp6_ptr->io.irqdel);
616 if (prot_ver != PROTOCOL_VERSION) {
617 printk("GDT-PCI: Illegal protocol version\n");
618 iounmap(ha->brd);
619 return 0;
620 }
621
622 ha->type = GDT_PCI;
623 ha->ic_all_size = sizeof(dp6_ptr->u);
624
625 /* special command to controller BIOS */
626 writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
627 writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
628 writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
629 writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
630 writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
631 writeb(0, &dp6_ptr->io.event);
632 retries = INIT_RETRIES;
633 gdth_delay(20);
634 while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
635 if (--retries == 0) {
636 printk("GDT-PCI: Initialization error\n");
637 iounmap(ha->brd);
638 return 0;
639 }
640 gdth_delay(1);
641 }
642 writeb(0, &dp6_ptr->u.ic.S_Status);
643 writeb(0xff, &dp6_ptr->io.irqdel);
644
645 ha->dma64_support = 0;
646
647 } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
648 ha->plx = (gdt6c_plx_regs *)pcistr->io;
649 TRACE2(("init_pci_new() dpmem %lx irq %d\n",
650 pcistr->dpmem,ha->irq));
651 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
652 if (ha->brd == NULL) {
653 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
654 iounmap(ha->brd);
655 return 0;
656 }
657 /* check and reset interface area */
658 dp6c_ptr = ha->brd;
659 writel(DPMEM_MAGIC, &dp6c_ptr->u);
660 if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
661 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
662 pcistr->dpmem);
663 found = FALSE;
664 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
665 iounmap(ha->brd);
666 ha->brd = ioremap(i, sizeof(u16));
667 if (ha->brd == NULL) {
668 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
669 return 0;
670 }
671 if (readw(ha->brd) != 0xffff) {
672 TRACE2(("init_pci_plx() address 0x%x busy\n", i));
673 continue;
674 }
675 iounmap(ha->brd);
676 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
677 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
678 if (ha->brd == NULL) {
679 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
680 return 0;
681 }
682 dp6c_ptr = ha->brd;
683 writel(DPMEM_MAGIC, &dp6c_ptr->u);
684 if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
685 printk("GDT-PCI: Use free address at 0x%x\n", i);
686 found = TRUE;
687 break;
688 }
689 }
690 if (!found) {
691 printk("GDT-PCI: No free address found!\n");
692 iounmap(ha->brd);
693 return 0;
694 }
695 }
696 memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
697 if (readl(&dp6c_ptr->u) != 0) {
698 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
699 iounmap(ha->brd);
700 return 0;
701 }
702
703 /* disable board interrupts, deinit services */
704 outb(0x00,PTR2USHORT(&ha->plx->control1));
705 outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
706
707 writeb(0x00, &dp6c_ptr->u.ic.S_Status);
708 writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
709
710 writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
711 writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
712
713 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
714
715 retries = INIT_RETRIES;
716 gdth_delay(20);
717 while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
718 if (--retries == 0) {
719 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
720 iounmap(ha->brd);
721 return 0;
722 }
723 gdth_delay(1);
724 }
725 prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
726 writeb(0, &dp6c_ptr->u.ic.Status);
727 if (prot_ver != PROTOCOL_VERSION) {
728 printk("GDT-PCI: Illegal protocol version\n");
729 iounmap(ha->brd);
730 return 0;
731 }
732
733 ha->type = GDT_PCINEW;
734 ha->ic_all_size = sizeof(dp6c_ptr->u);
735
736 /* special command to controller BIOS */
737 writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
738 writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
739 writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
740 writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
741 writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
742
743 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
744
745 retries = INIT_RETRIES;
746 gdth_delay(20);
747 while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
748 if (--retries == 0) {
749 printk("GDT-PCI: Initialization error\n");
750 iounmap(ha->brd);
751 return 0;
752 }
753 gdth_delay(1);
754 }
755 writeb(0, &dp6c_ptr->u.ic.S_Status);
756
757 ha->dma64_support = 0;
758
759 } else { /* MPR */
760 TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
761 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
762 if (ha->brd == NULL) {
763 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
764 return 0;
765 }
766
767 /* manipulate config. space to enable DPMEM, start RP controller */
768 pci_read_config_word(pdev, PCI_COMMAND, &command);
769 command |= 6;
770 pci_write_config_word(pdev, PCI_COMMAND, command);
771 gdth_delay(1);
772
773 dp6m_ptr = ha->brd;
774
775 /* Ensure that it is safe to access the non HW portions of DPMEM.
776 * Aditional check needed for Xscale based RAID controllers */
777 while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
778 gdth_delay(1);
779
780 /* check and reset interface area */
781 writel(DPMEM_MAGIC, &dp6m_ptr->u);
782 if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
783 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
784 pcistr->dpmem);
785 found = FALSE;
786 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
787 iounmap(ha->brd);
788 ha->brd = ioremap(i, sizeof(u16));
789 if (ha->brd == NULL) {
790 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
791 return 0;
792 }
793 if (readw(ha->brd) != 0xffff) {
794 TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
795 continue;
796 }
797 iounmap(ha->brd);
798 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
799 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
800 if (ha->brd == NULL) {
801 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
802 return 0;
803 }
804 dp6m_ptr = ha->brd;
805 writel(DPMEM_MAGIC, &dp6m_ptr->u);
806 if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
807 printk("GDT-PCI: Use free address at 0x%x\n", i);
808 found = TRUE;
809 break;
810 }
811 }
812 if (!found) {
813 printk("GDT-PCI: No free address found!\n");
814 iounmap(ha->brd);
815 return 0;
816 }
817 }
818 memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
819
820 /* disable board interrupts, deinit services */
821 writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
822 &dp6m_ptr->i960r.edoor_en_reg);
823 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
824 writeb(0x00, &dp6m_ptr->u.ic.S_Status);
825 writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
826
827 writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
828 writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
829 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
830 retries = INIT_RETRIES;
831 gdth_delay(20);
832 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
833 if (--retries == 0) {
834 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
835 iounmap(ha->brd);
836 return 0;
837 }
838 gdth_delay(1);
839 }
840 prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
841 writeb(0, &dp6m_ptr->u.ic.S_Status);
842 if (prot_ver != PROTOCOL_VERSION) {
843 printk("GDT-PCI: Illegal protocol version\n");
844 iounmap(ha->brd);
845 return 0;
846 }
847
848 ha->type = GDT_PCIMPR;
849 ha->ic_all_size = sizeof(dp6m_ptr->u);
850
851 /* special command to controller BIOS */
852 writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
853 writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
854 writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
855 writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
856 writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
857 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
858 retries = INIT_RETRIES;
859 gdth_delay(20);
860 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
861 if (--retries == 0) {
862 printk("GDT-PCI: Initialization error\n");
863 iounmap(ha->brd);
864 return 0;
865 }
866 gdth_delay(1);
867 }
868 writeb(0, &dp6m_ptr->u.ic.S_Status);
869
870 /* read FW version to detect 64-bit DMA support */
871 writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
872 writeb(1, &dp6m_ptr->i960r.ldoor_reg);
873 retries = INIT_RETRIES;
874 gdth_delay(20);
875 while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
876 if (--retries == 0) {
877 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
878 iounmap(ha->brd);
879 return 0;
880 }
881 gdth_delay(1);
882 }
883 prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
884 writeb(0, &dp6m_ptr->u.ic.S_Status);
885 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
886 ha->dma64_support = 0;
887 else
888 ha->dma64_support = 1;
889 }
890
891 return 1;
892 }
893
894 /* controller protocol functions */
895
896 static void gdth_enable_int(gdth_ha_str *ha)
897 {
898 unsigned long flags;
899 gdt6_dpram_str __iomem *dp6_ptr;
900 gdt6m_dpram_str __iomem *dp6m_ptr;
901
902 TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
903 spin_lock_irqsave(&ha->smp_lock, flags);
904
905 if (ha->type == GDT_PCI) {
906 dp6_ptr = ha->brd;
907 writeb(1, &dp6_ptr->io.irqdel);
908 writeb(0, &dp6_ptr->u.ic.Cmd_Index);
909 writeb(1, &dp6_ptr->io.irqen);
910 } else if (ha->type == GDT_PCINEW) {
911 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
912 outb(0x03, PTR2USHORT(&ha->plx->control1));
913 } else if (ha->type == GDT_PCIMPR) {
914 dp6m_ptr = ha->brd;
915 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
916 writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
917 &dp6m_ptr->i960r.edoor_en_reg);
918 }
919 spin_unlock_irqrestore(&ha->smp_lock, flags);
920 }
921
922 /* return IStatus if interrupt was from this card else 0 */
923 static u8 gdth_get_status(gdth_ha_str *ha)
924 {
925 u8 IStatus = 0;
926
927 TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
928
929 if (ha->type == GDT_PCI)
930 IStatus =
931 readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
932 else if (ha->type == GDT_PCINEW)
933 IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
934 else if (ha->type == GDT_PCIMPR)
935 IStatus =
936 readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
937
938 return IStatus;
939 }
940
941 static int gdth_test_busy(gdth_ha_str *ha)
942 {
943 register int gdtsema0 = 0;
944
945 TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
946
947 if (ha->type == GDT_PCI)
948 gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
949 else if (ha->type == GDT_PCINEW)
950 gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
951 else if (ha->type == GDT_PCIMPR)
952 gdtsema0 =
953 (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
954
955 return (gdtsema0 & 1);
956 }
957
958
959 static int gdth_get_cmd_index(gdth_ha_str *ha)
960 {
961 int i;
962
963 TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
964
965 for (i=0; i<GDTH_MAXCMDS; ++i) {
966 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
967 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
968 ha->cmd_tab[i].service = ha->pccb->Service;
969 ha->pccb->CommandIndex = (u32)i+2;
970 return (i+2);
971 }
972 }
973 return 0;
974 }
975
976
977 static void gdth_set_sema0(gdth_ha_str *ha)
978 {
979 TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
980
981 if (ha->type == GDT_PCI) {
982 writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
983 } else if (ha->type == GDT_PCINEW) {
984 outb(1, PTR2USHORT(&ha->plx->sema0_reg));
985 } else if (ha->type == GDT_PCIMPR) {
986 writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
987 }
988 }
989
990
991 static void gdth_copy_command(gdth_ha_str *ha)
992 {
993 register gdth_cmd_str *cmd_ptr;
994 register gdt6m_dpram_str __iomem *dp6m_ptr;
995 register gdt6c_dpram_str __iomem *dp6c_ptr;
996 gdt6_dpram_str __iomem *dp6_ptr;
997 u16 cp_count,dp_offset,cmd_no;
998
999 TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
1000
1001 cp_count = ha->cmd_len;
1002 dp_offset= ha->cmd_offs_dpmem;
1003 cmd_no = ha->cmd_cnt;
1004 cmd_ptr = ha->pccb;
1005
1006 ++ha->cmd_cnt;
1007
1008 /* set cpcount dword aligned */
1009 if (cp_count & 3)
1010 cp_count += (4 - (cp_count & 3));
1011
1012 ha->cmd_offs_dpmem += cp_count;
1013
1014 /* set offset and service, copy command to DPMEM */
1015 if (ha->type == GDT_PCI) {
1016 dp6_ptr = ha->brd;
1017 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1018 &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1019 writew((u16)cmd_ptr->Service,
1020 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1021 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1022 } else if (ha->type == GDT_PCINEW) {
1023 dp6c_ptr = ha->brd;
1024 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1025 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1026 writew((u16)cmd_ptr->Service,
1027 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1028 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1029 } else if (ha->type == GDT_PCIMPR) {
1030 dp6m_ptr = ha->brd;
1031 writew(dp_offset + DPMEM_COMMAND_OFFSET,
1032 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1033 writew((u16)cmd_ptr->Service,
1034 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1035 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1036 }
1037 }
1038
1039
1040 static void gdth_release_event(gdth_ha_str *ha)
1041 {
1042 TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
1043
1044 #ifdef GDTH_STATISTICS
1045 {
1046 u32 i,j;
1047 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1048 if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1049 ++i;
1050 }
1051 if (max_index < i) {
1052 max_index = i;
1053 TRACE3(("GDT: max_index = %d\n",(u16)i));
1054 }
1055 }
1056 #endif
1057
1058 if (ha->pccb->OpCode == GDT_INIT)
1059 ha->pccb->Service |= 0x80;
1060
1061 if (ha->type == GDT_PCI) {
1062 writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
1063 } else if (ha->type == GDT_PCINEW) {
1064 outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
1065 } else if (ha->type == GDT_PCIMPR) {
1066 writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
1067 }
1068 }
1069
1070 static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
1071 {
1072 int answer_found = FALSE;
1073 int wait_index = 0;
1074
1075 TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
1076
1077 if (index == 0)
1078 return 1; /* no wait required */
1079
1080 do {
1081 __gdth_interrupt(ha, true, &wait_index);
1082 if (wait_index == index) {
1083 answer_found = TRUE;
1084 break;
1085 }
1086 gdth_delay(1);
1087 } while (--time);
1088
1089 while (gdth_test_busy(ha))
1090 gdth_delay(0);
1091
1092 return (answer_found);
1093 }
1094
1095
1096 static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
1097 u32 p1, u64 p2, u64 p3)
1098 {
1099 register gdth_cmd_str *cmd_ptr;
1100 int retries,index;
1101
1102 TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
1103
1104 cmd_ptr = ha->pccb;
1105 memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
1106
1107 /* make command */
1108 for (retries = INIT_RETRIES;;) {
1109 cmd_ptr->Service = service;
1110 cmd_ptr->RequestBuffer = INTERNAL_CMND;
1111 if (!(index=gdth_get_cmd_index(ha))) {
1112 TRACE(("GDT: No free command index found\n"));
1113 return 0;
1114 }
1115 gdth_set_sema0(ha);
1116 cmd_ptr->OpCode = opcode;
1117 cmd_ptr->BoardNode = LOCALBOARD;
1118 if (service == CACHESERVICE) {
1119 if (opcode == GDT_IOCTL) {
1120 cmd_ptr->u.ioctl.subfunc = p1;
1121 cmd_ptr->u.ioctl.channel = (u32)p2;
1122 cmd_ptr->u.ioctl.param_size = (u16)p3;
1123 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1124 } else {
1125 if (ha->cache_feat & GDT_64BIT) {
1126 cmd_ptr->u.cache64.DeviceNo = (u16)p1;
1127 cmd_ptr->u.cache64.BlockNo = p2;
1128 } else {
1129 cmd_ptr->u.cache.DeviceNo = (u16)p1;
1130 cmd_ptr->u.cache.BlockNo = (u32)p2;
1131 }
1132 }
1133 } else if (service == SCSIRAWSERVICE) {
1134 if (ha->raw_feat & GDT_64BIT) {
1135 cmd_ptr->u.raw64.direction = p1;
1136 cmd_ptr->u.raw64.bus = (u8)p2;
1137 cmd_ptr->u.raw64.target = (u8)p3;
1138 cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
1139 } else {
1140 cmd_ptr->u.raw.direction = p1;
1141 cmd_ptr->u.raw.bus = (u8)p2;
1142 cmd_ptr->u.raw.target = (u8)p3;
1143 cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
1144 }
1145 } else if (service == SCREENSERVICE) {
1146 if (opcode == GDT_REALTIME) {
1147 *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1148 *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
1149 *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
1150 }
1151 }
1152 ha->cmd_len = sizeof(gdth_cmd_str);
1153 ha->cmd_offs_dpmem = 0;
1154 ha->cmd_cnt = 0;
1155 gdth_copy_command(ha);
1156 gdth_release_event(ha);
1157 gdth_delay(20);
1158 if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
1159 printk("GDT: Initialization error (timeout service %d)\n",service);
1160 return 0;
1161 }
1162 if (ha->status != S_BSY || --retries == 0)
1163 break;
1164 gdth_delay(1);
1165 }
1166
1167 return (ha->status != S_OK ? 0:1);
1168 }
1169
1170
1171 /* search for devices */
1172
1173 static int gdth_search_drives(gdth_ha_str *ha)
1174 {
1175 u16 cdev_cnt, i;
1176 int ok;
1177 u32 bus_no, drv_cnt, drv_no, j;
1178 gdth_getch_str *chn;
1179 gdth_drlist_str *drl;
1180 gdth_iochan_str *ioc;
1181 gdth_raw_iochan_str *iocr;
1182 gdth_arcdl_str *alst;
1183 gdth_alist_str *alst2;
1184 gdth_oem_str_ioctl *oemstr;
1185
1186 TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
1187 ok = 0;
1188
1189 /* initialize controller services, at first: screen service */
1190 ha->screen_feat = 0;
1191 if (!force_dma32) {
1192 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
1193 if (ok)
1194 ha->screen_feat = GDT_64BIT;
1195 }
1196 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1197 ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
1198 if (!ok) {
1199 printk("GDT-HA %d: Initialization error screen service (code %d)\n",
1200 ha->hanum, ha->status);
1201 return 0;
1202 }
1203 TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
1204
1205 /* unfreeze all IOs */
1206 gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
1207
1208 /* initialize cache service */
1209 ha->cache_feat = 0;
1210 if (!force_dma32) {
1211 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
1212 0, 0);
1213 if (ok)
1214 ha->cache_feat = GDT_64BIT;
1215 }
1216 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1217 ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
1218 if (!ok) {
1219 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1220 ha->hanum, ha->status);
1221 return 0;
1222 }
1223 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1224 cdev_cnt = (u16)ha->info;
1225 ha->fw_vers = ha->service;
1226
1227 /* detect number of buses - try new IOCTL */
1228 iocr = (gdth_raw_iochan_str *)ha->pscratch;
1229 iocr->hdr.version = 0xffffffff;
1230 iocr->hdr.list_entries = MAXBUS;
1231 iocr->hdr.first_chan = 0;
1232 iocr->hdr.last_chan = MAXBUS-1;
1233 iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
1234 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
1235 INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
1236 TRACE2(("IOCHAN_RAW_DESC supported!\n"));
1237 ha->bus_cnt = iocr->hdr.chan_count;
1238 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1239 if (iocr->list[bus_no].proc_id < MAXID)
1240 ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
1241 else
1242 ha->bus_id[bus_no] = 0xff;
1243 }
1244 } else {
1245 /* old method */
1246 chn = (gdth_getch_str *)ha->pscratch;
1247 for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
1248 chn->channel_no = bus_no;
1249 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1250 SCSI_CHAN_CNT | L_CTRL_PATTERN,
1251 IO_CHANNEL | INVALID_CHANNEL,
1252 sizeof(gdth_getch_str))) {
1253 if (bus_no == 0) {
1254 printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
1255 ha->hanum, ha->status);
1256 return 0;
1257 }
1258 break;
1259 }
1260 if (chn->siop_id < MAXID)
1261 ha->bus_id[bus_no] = chn->siop_id;
1262 else
1263 ha->bus_id[bus_no] = 0xff;
1264 }
1265 ha->bus_cnt = (u8)bus_no;
1266 }
1267 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
1268
1269 /* read cache configuration */
1270 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
1271 INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
1272 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1273 ha->hanum, ha->status);
1274 return 0;
1275 }
1276 ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
1277 TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
1278 ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
1279 ha->cpar.write_back,ha->cpar.block_size));
1280
1281 /* read board info and features */
1282 ha->more_proc = FALSE;
1283 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
1284 INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
1285 memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
1286 sizeof(gdth_binfo_str));
1287 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
1288 INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
1289 TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
1290 ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
1291 ha->more_proc = TRUE;
1292 }
1293 } else {
1294 TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
1295 strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
1296 }
1297 TRACE2(("Controller name: %s\n",ha->binfo.type_string));
1298
1299 /* read more informations */
1300 if (ha->more_proc) {
1301 /* physical drives, channel addresses */
1302 ioc = (gdth_iochan_str *)ha->pscratch;
1303 ioc->hdr.version = 0xffffffff;
1304 ioc->hdr.list_entries = MAXBUS;
1305 ioc->hdr.first_chan = 0;
1306 ioc->hdr.last_chan = MAXBUS-1;
1307 ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
1308 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
1309 INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
1310 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1311 ha->raw[bus_no].address = ioc->list[bus_no].address;
1312 ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
1313 }
1314 } else {
1315 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1316 ha->raw[bus_no].address = IO_CHANNEL;
1317 ha->raw[bus_no].local_no = bus_no;
1318 }
1319 }
1320 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
1321 chn = (gdth_getch_str *)ha->pscratch;
1322 chn->channel_no = ha->raw[bus_no].local_no;
1323 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1324 SCSI_CHAN_CNT | L_CTRL_PATTERN,
1325 ha->raw[bus_no].address | INVALID_CHANNEL,
1326 sizeof(gdth_getch_str))) {
1327 ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
1328 TRACE2(("Channel %d: %d phys. drives\n",
1329 bus_no,chn->drive_cnt));
1330 }
1331 if (ha->raw[bus_no].pdev_cnt > 0) {
1332 drl = (gdth_drlist_str *)ha->pscratch;
1333 drl->sc_no = ha->raw[bus_no].local_no;
1334 drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
1335 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1336 SCSI_DR_LIST | L_CTRL_PATTERN,
1337 ha->raw[bus_no].address | INVALID_CHANNEL,
1338 sizeof(gdth_drlist_str))) {
1339 for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
1340 ha->raw[bus_no].id_list[j] = drl->sc_list[j];
1341 } else {
1342 ha->raw[bus_no].pdev_cnt = 0;
1343 }
1344 }
1345 }
1346
1347 /* logical drives */
1348 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
1349 INVALID_CHANNEL,sizeof(u32))) {
1350 drv_cnt = *(u32 *)ha->pscratch;
1351 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
1352 INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
1353 for (j = 0; j < drv_cnt; ++j) {
1354 drv_no = ((u32 *)ha->pscratch)[j];
1355 if (drv_no < MAX_LDRIVES) {
1356 ha->hdr[drv_no].is_logdrv = TRUE;
1357 TRACE2(("Drive %d is log. drive\n",drv_no));
1358 }
1359 }
1360 }
1361 alst = (gdth_arcdl_str *)ha->pscratch;
1362 alst->entries_avail = MAX_LDRIVES;
1363 alst->first_entry = 0;
1364 alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
1365 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1366 ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
1367 INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
1368 (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
1369 for (j = 0; j < alst->entries_init; ++j) {
1370 ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
1371 ha->hdr[j].is_master = alst->list[j].is_master;
1372 ha->hdr[j].is_parity = alst->list[j].is_parity;
1373 ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
1374 ha->hdr[j].master_no = alst->list[j].cd_handle;
1375 }
1376 } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1377 ARRAY_DRV_LIST | LA_CTRL_PATTERN,
1378 0, 35 * sizeof(gdth_alist_str))) {
1379 for (j = 0; j < 35; ++j) {
1380 alst2 = &((gdth_alist_str *)ha->pscratch)[j];
1381 ha->hdr[j].is_arraydrv = alst2->is_arrayd;
1382 ha->hdr[j].is_master = alst2->is_master;
1383 ha->hdr[j].is_parity = alst2->is_parity;
1384 ha->hdr[j].is_hotfix = alst2->is_hotfix;
1385 ha->hdr[j].master_no = alst2->cd_handle;
1386 }
1387 }
1388 }
1389 }
1390
1391 /* initialize raw service */
1392 ha->raw_feat = 0;
1393 if (!force_dma32) {
1394 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
1395 if (ok)
1396 ha->raw_feat = GDT_64BIT;
1397 }
1398 if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
1399 ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
1400 if (!ok) {
1401 printk("GDT-HA %d: Initialization error raw service (code %d)\n",
1402 ha->hanum, ha->status);
1403 return 0;
1404 }
1405 TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
1406
1407 /* set/get features raw service (scatter/gather) */
1408 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
1409 0, 0)) {
1410 TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
1411 if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1412 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
1413 ha->info));
1414 ha->raw_feat |= (u16)ha->info;
1415 }
1416 }
1417
1418 /* set/get features cache service (equal to raw service) */
1419 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
1420 SCATTER_GATHER,0)) {
1421 TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
1422 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
1423 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
1424 ha->info));
1425 ha->cache_feat |= (u16)ha->info;
1426 }
1427 }
1428
1429 /* reserve drives for raw service */
1430 if (reserve_mode != 0) {
1431 gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
1432 reserve_mode == 1 ? 1 : 3, 0, 0);
1433 TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
1434 ha->status));
1435 }
1436 for (i = 0; i < MAX_RES_ARGS; i += 4) {
1437 if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
1438 reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
1439 TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
1440 reserve_list[i], reserve_list[i+1],
1441 reserve_list[i+2], reserve_list[i+3]));
1442 if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
1443 reserve_list[i+1], reserve_list[i+2] |
1444 (reserve_list[i+3] << 8))) {
1445 printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
1446 ha->hanum, ha->status);
1447 }
1448 }
1449 }
1450
1451 /* Determine OEM string using IOCTL */
1452 oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
1453 oemstr->params.ctl_version = 0x01;
1454 oemstr->params.buffer_size = sizeof(oemstr->text);
1455 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
1456 CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
1457 sizeof(gdth_oem_str_ioctl))) {
1458 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
1459 printk("GDT-HA %d: Vendor: %s Name: %s\n",
1460 ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
1461 /* Save the Host Drive inquiry data */
1462 strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
1463 sizeof(ha->oem_name));
1464 } else {
1465 /* Old method, based on PCI ID */
1466 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
1467 printk("GDT-HA %d: Name: %s\n",
1468 ha->hanum, ha->binfo.type_string);
1469 if (ha->oem_id == OEM_ID_INTEL)
1470 strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
1471 else
1472 strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
1473 }
1474
1475 /* scanning for host drives */
1476 for (i = 0; i < cdev_cnt; ++i)
1477 gdth_analyse_hdrive(ha, i);
1478
1479 TRACE(("gdth_search_drives() OK\n"));
1480 return 1;
1481 }
1482
1483 static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
1484 {
1485 u32 drv_cyls;
1486 int drv_hds, drv_secs;
1487
1488 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
1489 if (hdrive >= MAX_HDRIVES)
1490 return 0;
1491
1492 if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
1493 return 0;
1494 ha->hdr[hdrive].present = TRUE;
1495 ha->hdr[hdrive].size = ha->info;
1496
1497 /* evaluate mapping (sectors per head, heads per cylinder) */
1498 ha->hdr[hdrive].size &= ~SECS32;
1499 if (ha->info2 == 0) {
1500 gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
1501 } else {
1502 drv_hds = ha->info2 & 0xff;
1503 drv_secs = (ha->info2 >> 8) & 0xff;
1504 drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
1505 }
1506 ha->hdr[hdrive].heads = (u8)drv_hds;
1507 ha->hdr[hdrive].secs = (u8)drv_secs;
1508 /* round size */
1509 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
1510
1511 if (ha->cache_feat & GDT_64BIT) {
1512 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
1513 && ha->info2 != 0) {
1514 ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
1515 }
1516 }
1517 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
1518 hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
1519
1520 /* get informations about device */
1521 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
1522 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
1523 hdrive,ha->info));
1524 ha->hdr[hdrive].devtype = (u16)ha->info;
1525 }
1526
1527 /* cluster info */
1528 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
1529 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
1530 hdrive,ha->info));
1531 if (!shared_access)
1532 ha->hdr[hdrive].cluster_type = (u8)ha->info;
1533 }
1534
1535 /* R/W attributes */
1536 if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
1537 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
1538 hdrive,ha->info));
1539 ha->hdr[hdrive].rw_attribs = (u8)ha->info;
1540 }
1541
1542 return 1;
1543 }
1544
1545
1546 /* command queueing/sending functions */
1547
1548 static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
1549 {
1550 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1551 register struct scsi_cmnd *pscp;
1552 register struct scsi_cmnd *nscp;
1553 unsigned long flags;
1554
1555 TRACE(("gdth_putq() priority %d\n",priority));
1556 spin_lock_irqsave(&ha->smp_lock, flags);
1557
1558 if (!cmndinfo->internal_command)
1559 cmndinfo->priority = priority;
1560
1561 if (ha->req_first==NULL) {
1562 ha->req_first = scp; /* queue was empty */
1563 scp->SCp.ptr = NULL;
1564 } else { /* queue not empty */
1565 pscp = ha->req_first;
1566 nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1567 /* priority: 0-highest,..,0xff-lowest */
1568 while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
1569 pscp = nscp;
1570 nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1571 }
1572 pscp->SCp.ptr = (char *)scp;
1573 scp->SCp.ptr = (char *)nscp;
1574 }
1575 spin_unlock_irqrestore(&ha->smp_lock, flags);
1576
1577 #ifdef GDTH_STATISTICS
1578 flags = 0;
1579 for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
1580 ++flags;
1581 if (max_rq < flags) {
1582 max_rq = flags;
1583 TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
1584 }
1585 #endif
1586 }
1587
1588 static void gdth_next(gdth_ha_str *ha)
1589 {
1590 register struct scsi_cmnd *pscp;
1591 register struct scsi_cmnd *nscp;
1592 u8 b, t, l, firsttime;
1593 u8 this_cmd, next_cmd;
1594 unsigned long flags = 0;
1595 int cmd_index;
1596
1597 TRACE(("gdth_next() hanum %d\n", ha->hanum));
1598 if (!gdth_polling)
1599 spin_lock_irqsave(&ha->smp_lock, flags);
1600
1601 ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
1602 this_cmd = firsttime = TRUE;
1603 next_cmd = gdth_polling ? FALSE:TRUE;
1604 cmd_index = 0;
1605
1606 for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
1607 struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
1608 if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
1609 pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
1610 if (!nscp_cmndinfo->internal_command) {
1611 b = nscp->device->channel;
1612 t = nscp->device->id;
1613 l = nscp->device->lun;
1614 if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
1615 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
1616 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
1617 continue;
1618 }
1619 } else
1620 b = t = l = 0;
1621
1622 if (firsttime) {
1623 if (gdth_test_busy(ha)) { /* controller busy ? */
1624 TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
1625 if (!gdth_polling) {
1626 spin_unlock_irqrestore(&ha->smp_lock, flags);
1627 return;
1628 }
1629 while (gdth_test_busy(ha))
1630 gdth_delay(1);
1631 }
1632 firsttime = FALSE;
1633 }
1634
1635 if (!nscp_cmndinfo->internal_command) {
1636 if (nscp_cmndinfo->phase == -1) {
1637 nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
1638 if (nscp->cmnd[0] == TEST_UNIT_READY) {
1639 TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
1640 b, t, l));
1641 /* TEST_UNIT_READY -> set scan mode */
1642 if ((ha->scan_mode & 0x0f) == 0) {
1643 if (b == 0 && t == 0 && l == 0) {
1644 ha->scan_mode |= 1;
1645 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1646 }
1647 } else if ((ha->scan_mode & 0x0f) == 1) {
1648 if (b == 0 && ((t == 0 && l == 1) ||
1649 (t == 1 && l == 0))) {
1650 nscp_cmndinfo->OpCode = GDT_SCAN_START;
1651 nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
1652 | SCSIRAWSERVICE;
1653 ha->scan_mode = 0x12;
1654 TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
1655 ha->scan_mode));
1656 } else {
1657 ha->scan_mode &= 0x10;
1658 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
1659 }
1660 } else if (ha->scan_mode == 0x12) {
1661 if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
1662 nscp_cmndinfo->phase = SCSIRAWSERVICE;
1663 nscp_cmndinfo->OpCode = GDT_SCAN_END;
1664 ha->scan_mode &= 0x10;
1665 TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
1666 ha->scan_mode));
1667 }
1668 }
1669 }
1670 if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
1671 nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
1672 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
1673 /* always GDT_CLUST_INFO! */
1674 nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
1675 }
1676 }
1677 }
1678
1679 if (nscp_cmndinfo->OpCode != -1) {
1680 if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
1681 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1682 this_cmd = FALSE;
1683 next_cmd = FALSE;
1684 } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
1685 if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1686 this_cmd = FALSE;
1687 next_cmd = FALSE;
1688 } else {
1689 memset((char*)nscp->sense_buffer,0,16);
1690 nscp->sense_buffer[0] = 0x70;
1691 nscp->sense_buffer[2] = NOT_READY;
1692 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1693 if (!nscp_cmndinfo->wait_for_completion)
1694 nscp_cmndinfo->wait_for_completion++;
1695 else
1696 gdth_scsi_done(nscp);
1697 }
1698 } else if (gdth_cmnd_priv(nscp)->internal_command) {
1699 if (!(cmd_index=gdth_special_cmd(ha, nscp)))
1700 this_cmd = FALSE;
1701 next_cmd = FALSE;
1702 } else if (b != ha->virt_bus) {
1703 if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
1704 !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
1705 this_cmd = FALSE;
1706 else
1707 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
1708 } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
1709 TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
1710 nscp->cmnd[0], b, t, l));
1711 nscp->result = DID_BAD_TARGET << 16;
1712 if (!nscp_cmndinfo->wait_for_completion)
1713 nscp_cmndinfo->wait_for_completion++;
1714 else
1715 gdth_scsi_done(nscp);
1716 } else {
1717 switch (nscp->cmnd[0]) {
1718 case TEST_UNIT_READY:
1719 case INQUIRY:
1720 case REQUEST_SENSE:
1721 case READ_CAPACITY:
1722 case VERIFY:
1723 case START_STOP:
1724 case MODE_SENSE:
1725 case SERVICE_ACTION_IN_16:
1726 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1727 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1728 nscp->cmnd[4],nscp->cmnd[5]));
1729 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
1730 /* return UNIT_ATTENTION */
1731 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1732 nscp->cmnd[0], t));
1733 ha->hdr[t].media_changed = FALSE;
1734 memset((char*)nscp->sense_buffer,0,16);
1735 nscp->sense_buffer[0] = 0x70;
1736 nscp->sense_buffer[2] = UNIT_ATTENTION;
1737 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1738 if (!nscp_cmndinfo->wait_for_completion)
1739 nscp_cmndinfo->wait_for_completion++;
1740 else
1741 gdth_scsi_done(nscp);
1742 } else if (gdth_internal_cache_cmd(ha, nscp))
1743 gdth_scsi_done(nscp);
1744 break;
1745
1746 case ALLOW_MEDIUM_REMOVAL:
1747 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
1748 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1749 nscp->cmnd[4],nscp->cmnd[5]));
1750 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
1751 TRACE(("Prevent r. nonremov. drive->do nothing\n"));
1752 nscp->result = DID_OK << 16;
1753 nscp->sense_buffer[0] = 0;
1754 if (!nscp_cmndinfo->wait_for_completion)
1755 nscp_cmndinfo->wait_for_completion++;
1756 else
1757 gdth_scsi_done(nscp);
1758 } else {
1759 nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
1760 TRACE(("Prevent/allow r. %d rem. drive %d\n",
1761 nscp->cmnd[4],nscp->cmnd[3]));
1762 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1763 this_cmd = FALSE;
1764 }
1765 break;
1766
1767 case RESERVE:
1768 case RELEASE:
1769 TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
1770 "RESERVE" : "RELEASE"));
1771 if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1772 this_cmd = FALSE;
1773 break;
1774
1775 case READ_6:
1776 case WRITE_6:
1777 case READ_10:
1778 case WRITE_10:
1779 case READ_16:
1780 case WRITE_16:
1781 if (ha->hdr[t].media_changed) {
1782 /* return UNIT_ATTENTION */
1783 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
1784 nscp->cmnd[0], t));
1785 ha->hdr[t].media_changed = FALSE;
1786 memset((char*)nscp->sense_buffer,0,16);
1787 nscp->sense_buffer[0] = 0x70;
1788 nscp->sense_buffer[2] = UNIT_ATTENTION;
1789 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1790 if (!nscp_cmndinfo->wait_for_completion)
1791 nscp_cmndinfo->wait_for_completion++;
1792 else
1793 gdth_scsi_done(nscp);
1794 } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
1795 this_cmd = FALSE;
1796 break;
1797
1798 default:
1799 TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
1800 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
1801 nscp->cmnd[4],nscp->cmnd[5]));
1802 printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
1803 ha->hanum, nscp->cmnd[0]);
1804 nscp->result = DID_ABORT << 16;
1805 if (!nscp_cmndinfo->wait_for_completion)
1806 nscp_cmndinfo->wait_for_completion++;
1807 else
1808 gdth_scsi_done(nscp);
1809 break;
1810 }
1811 }
1812
1813 if (!this_cmd)
1814 break;
1815 if (nscp == ha->req_first)
1816 ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
1817 else
1818 pscp->SCp.ptr = nscp->SCp.ptr;
1819 if (!next_cmd)
1820 break;
1821 }
1822
1823 if (ha->cmd_cnt > 0) {
1824 gdth_release_event(ha);
1825 }
1826
1827 if (!gdth_polling)
1828 spin_unlock_irqrestore(&ha->smp_lock, flags);
1829
1830 if (gdth_polling && ha->cmd_cnt > 0) {
1831 if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
1832 printk("GDT-HA %d: Command %d timed out !\n",
1833 ha->hanum, cmd_index);
1834 }
1835 }
1836
1837 /*
1838 * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
1839 * buffers, kmap_atomic() as needed.
1840 */
1841 static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
1842 char *buffer, u16 count)
1843 {
1844 u16 cpcount,i, max_sg = scsi_sg_count(scp);
1845 u16 cpsum,cpnow;
1846 struct scatterlist *sl;
1847 char *address;
1848
1849 cpcount = min_t(u16, count, scsi_bufflen(scp));
1850
1851 if (cpcount) {
1852 cpsum=0;
1853 scsi_for_each_sg(scp, sl, max_sg, i) {
1854 unsigned long flags;
1855 cpnow = (u16)sl->length;
1856 TRACE(("copy_internal() now %d sum %d count %d %d\n",
1857 cpnow, cpsum, cpcount, scsi_bufflen(scp)));
1858 if (cpsum+cpnow > cpcount)
1859 cpnow = cpcount - cpsum;
1860 cpsum += cpnow;
1861 if (!sg_page(sl)) {
1862 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
1863 ha->hanum);
1864 return;
1865 }
1866 local_irq_save(flags);
1867 address = kmap_atomic(sg_page(sl)) + sl->offset;
1868 memcpy(address, buffer, cpnow);
1869 flush_dcache_page(sg_page(sl));
1870 kunmap_atomic(address);
1871 local_irq_restore(flags);
1872 if (cpsum == cpcount)
1873 break;
1874 buffer += cpnow;
1875 }
1876 } else if (count) {
1877 printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
1878 ha->hanum);
1879 WARN_ON(1);
1880 }
1881 }
1882
1883 static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
1884 {
1885 u8 t;
1886 gdth_inq_data inq;
1887 gdth_rdcap_data rdc;
1888 gdth_sense_data sd;
1889 gdth_modep_data mpd;
1890 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1891
1892 t = scp->device->id;
1893 TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
1894 scp->cmnd[0],t));
1895
1896 scp->result = DID_OK << 16;
1897 scp->sense_buffer[0] = 0;
1898
1899 switch (scp->cmnd[0]) {
1900 case TEST_UNIT_READY:
1901 case VERIFY:
1902 case START_STOP:
1903 TRACE2(("Test/Verify/Start hdrive %d\n",t));
1904 break;
1905
1906 case INQUIRY:
1907 TRACE2(("Inquiry hdrive %d devtype %d\n",
1908 t,ha->hdr[t].devtype));
1909 inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
1910 /* you can here set all disks to removable, if you want to do
1911 a flush using the ALLOW_MEDIUM_REMOVAL command */
1912 inq.modif_rmb = 0x00;
1913 if ((ha->hdr[t].devtype & 1) ||
1914 (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
1915 inq.modif_rmb = 0x80;
1916 inq.version = 2;
1917 inq.resp_aenc = 2;
1918 inq.add_length= 32;
1919 strcpy(inq.vendor,ha->oem_name);
1920 snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t);
1921 strcpy(inq.revision," ");
1922 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
1923 break;
1924
1925 case REQUEST_SENSE:
1926 TRACE2(("Request sense hdrive %d\n",t));
1927 sd.errorcode = 0x70;
1928 sd.segno = 0x00;
1929 sd.key = NO_SENSE;
1930 sd.info = 0;
1931 sd.add_length= 0;
1932 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
1933 break;
1934
1935 case MODE_SENSE:
1936 TRACE2(("Mode sense hdrive %d\n",t));
1937 memset((char*)&mpd,0,sizeof(gdth_modep_data));
1938 mpd.hd.data_length = sizeof(gdth_modep_data);
1939 mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
1940 mpd.hd.bd_length = sizeof(mpd.bd);
1941 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
1942 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
1943 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
1944 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
1945 break;
1946
1947 case READ_CAPACITY:
1948 TRACE2(("Read capacity hdrive %d\n",t));
1949 if (ha->hdr[t].size > (u64)0xffffffff)
1950 rdc.last_block_no = 0xffffffff;
1951 else
1952 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
1953 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
1954 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
1955 break;
1956
1957 case SERVICE_ACTION_IN_16:
1958 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
1959 (ha->cache_feat & GDT_64BIT)) {
1960 gdth_rdcap16_data rdc16;
1961
1962 TRACE2(("Read capacity (16) hdrive %d\n",t));
1963 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
1964 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
1965 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
1966 sizeof(gdth_rdcap16_data));
1967 } else {
1968 scp->result = DID_ABORT << 16;
1969 }
1970 break;
1971
1972 default:
1973 TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
1974 break;
1975 }
1976
1977 if (!cmndinfo->wait_for_completion)
1978 cmndinfo->wait_for_completion++;
1979 else
1980 return 1;
1981
1982 return 0;
1983 }
1984
1985 static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
1986 u16 hdrive)
1987 {
1988 register gdth_cmd_str *cmdp;
1989 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1990 u32 cnt, blockcnt;
1991 u64 no, blockno;
1992 int i, cmd_index, read_write, sgcnt, mode64;
1993
1994 cmdp = ha->pccb;
1995 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
1996 scp->cmnd[0],scp->cmd_len,hdrive));
1997
1998 mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
1999 /* test for READ_16, WRITE_16 if !mode64 ? ---
2000 not required, should not occur due to error return on
2001 READ_CAPACITY_16 */
2002
2003 cmdp->Service = CACHESERVICE;
2004 cmdp->RequestBuffer = scp;
2005 /* search free command index */
2006 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2007 TRACE(("GDT: No free command index found\n"));
2008 return 0;
2009 }
2010 /* if it's the first command, set command semaphore */
2011 if (ha->cmd_cnt == 0)
2012 gdth_set_sema0(ha);
2013
2014 /* fill command */
2015 read_write = 0;
2016 if (cmndinfo->OpCode != -1)
2017 cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
2018 else if (scp->cmnd[0] == RESERVE)
2019 cmdp->OpCode = GDT_RESERVE_DRV;
2020 else if (scp->cmnd[0] == RELEASE)
2021 cmdp->OpCode = GDT_RELEASE_DRV;
2022 else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
2023 if (scp->cmnd[4] & 1) /* prevent ? */
2024 cmdp->OpCode = GDT_MOUNT;
2025 else if (scp->cmnd[3] & 1) /* removable drive ? */
2026 cmdp->OpCode = GDT_UNMOUNT;
2027 else
2028 cmdp->OpCode = GDT_FLUSH;
2029 } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
2030 scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
2031 ) {
2032 read_write = 1;
2033 if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
2034 (ha->cache_feat & GDT_WR_THROUGH)))
2035 cmdp->OpCode = GDT_WRITE_THR;
2036 else
2037 cmdp->OpCode = GDT_WRITE;
2038 } else {
2039 read_write = 2;
2040 cmdp->OpCode = GDT_READ;
2041 }
2042
2043 cmdp->BoardNode = LOCALBOARD;
2044 if (mode64) {
2045 cmdp->u.cache64.DeviceNo = hdrive;
2046 cmdp->u.cache64.BlockNo = 1;
2047 cmdp->u.cache64.sg_canz = 0;
2048 } else {
2049 cmdp->u.cache.DeviceNo = hdrive;
2050 cmdp->u.cache.BlockNo = 1;
2051 cmdp->u.cache.sg_canz = 0;
2052 }
2053
2054 if (read_write) {
2055 if (scp->cmd_len == 16) {
2056 memcpy(&no, &scp->cmnd[2], sizeof(u64));
2057 blockno = be64_to_cpu(no);
2058 memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
2059 blockcnt = be32_to_cpu(cnt);
2060 } else if (scp->cmd_len == 10) {
2061 memcpy(&no, &scp->cmnd[2], sizeof(u32));
2062 blockno = be32_to_cpu(no);
2063 memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
2064 blockcnt = be16_to_cpu(cnt);
2065 } else {
2066 memcpy(&no, &scp->cmnd[0], sizeof(u32));
2067 blockno = be32_to_cpu(no) & 0x001fffffUL;
2068 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2069 }
2070 if (mode64) {
2071 cmdp->u.cache64.BlockNo = blockno;
2072 cmdp->u.cache64.BlockCnt = blockcnt;
2073 } else {
2074 cmdp->u.cache.BlockNo = (u32)blockno;
2075 cmdp->u.cache.BlockCnt = blockcnt;
2076 }
2077
2078 if (scsi_bufflen(scp)) {
2079 cmndinfo->dma_dir = (read_write == 1 ?
2080 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2081 sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2082 scsi_sg_count(scp), cmndinfo->dma_dir);
2083 if (mode64) {
2084 struct scatterlist *sl;
2085
2086 cmdp->u.cache64.DestAddr= (u64)-1;
2087 cmdp->u.cache64.sg_canz = sgcnt;
2088 scsi_for_each_sg(scp, sl, sgcnt, i) {
2089 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2090 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
2091 }
2092 } else {
2093 struct scatterlist *sl;
2094
2095 cmdp->u.cache.DestAddr= 0xffffffff;
2096 cmdp->u.cache.sg_canz = sgcnt;
2097 scsi_for_each_sg(scp, sl, sgcnt, i) {
2098 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
2099 cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
2100 }
2101 }
2102
2103 #ifdef GDTH_STATISTICS
2104 if (max_sg < (u32)sgcnt) {
2105 max_sg = (u32)sgcnt;
2106 TRACE3(("GDT: max_sg = %d\n",max_sg));
2107 }
2108 #endif
2109
2110 }
2111 }
2112 /* evaluate command size, check space */
2113 if (mode64) {
2114 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2115 cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
2116 cmdp->u.cache64.sg_lst[0].sg_ptr,
2117 cmdp->u.cache64.sg_lst[0].sg_len));
2118 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2119 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2120 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2121 (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2122 } else {
2123 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2124 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
2125 cmdp->u.cache.sg_lst[0].sg_ptr,
2126 cmdp->u.cache.sg_lst[0].sg_len));
2127 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2128 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
2129 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
2130 (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
2131 }
2132 if (ha->cmd_len & 3)
2133 ha->cmd_len += (4 - (ha->cmd_len & 3));
2134
2135 if (ha->cmd_cnt > 0) {
2136 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2137 ha->ic_all_size) {
2138 TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
2139 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2140 return 0;
2141 }
2142 }
2143
2144 /* copy command */
2145 gdth_copy_command(ha);
2146 return cmd_index;
2147 }
2148
2149 static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
2150 {
2151 register gdth_cmd_str *cmdp;
2152 u16 i;
2153 dma_addr_t sense_paddr;
2154 int cmd_index, sgcnt, mode64;
2155 u8 t,l;
2156 struct gdth_cmndinfo *cmndinfo;
2157
2158 t = scp->device->id;
2159 l = scp->device->lun;
2160 cmdp = ha->pccb;
2161 TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
2162 scp->cmnd[0],b,t,l));
2163
2164 mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
2165
2166 cmdp->Service = SCSIRAWSERVICE;
2167 cmdp->RequestBuffer = scp;
2168 /* search free command index */
2169 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2170 TRACE(("GDT: No free command index found\n"));
2171 return 0;
2172 }
2173 /* if it's the first command, set command semaphore */
2174 if (ha->cmd_cnt == 0)
2175 gdth_set_sema0(ha);
2176
2177 cmndinfo = gdth_cmnd_priv(scp);
2178 /* fill command */
2179 if (cmndinfo->OpCode != -1) {
2180 cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
2181 cmdp->BoardNode = LOCALBOARD;
2182 if (mode64) {
2183 cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
2184 TRACE2(("special raw cmd 0x%x param 0x%x\n",
2185 cmdp->OpCode, cmdp->u.raw64.direction));
2186 /* evaluate command size */
2187 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
2188 } else {
2189 cmdp->u.raw.direction = (cmndinfo->phase >> 8);
2190 TRACE2(("special raw cmd 0x%x param 0x%x\n",
2191 cmdp->OpCode, cmdp->u.raw.direction));
2192 /* evaluate command size */
2193 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
2194 }
2195
2196 } else {
2197 sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
2198 DMA_FROM_DEVICE);
2199
2200 cmndinfo->sense_paddr = sense_paddr;
2201 cmdp->OpCode = GDT_WRITE; /* always */
2202 cmdp->BoardNode = LOCALBOARD;
2203 if (mode64) {
2204 cmdp->u.raw64.reserved = 0;
2205 cmdp->u.raw64.mdisc_time = 0;
2206 cmdp->u.raw64.mcon_time = 0;
2207 cmdp->u.raw64.clen = scp->cmd_len;
2208 cmdp->u.raw64.target = t;
2209 cmdp->u.raw64.lun = l;
2210 cmdp->u.raw64.bus = b;
2211 cmdp->u.raw64.priority = 0;
2212 cmdp->u.raw64.sdlen = scsi_bufflen(scp);
2213 cmdp->u.raw64.sense_len = 16;
2214 cmdp->u.raw64.sense_data = sense_paddr;
2215 cmdp->u.raw64.direction =
2216 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2217 memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
2218 cmdp->u.raw64.sg_ranz = 0;
2219 } else {
2220 cmdp->u.raw.reserved = 0;
2221 cmdp->u.raw.mdisc_time = 0;
2222 cmdp->u.raw.mcon_time = 0;
2223 cmdp->u.raw.clen = scp->cmd_len;
2224 cmdp->u.raw.target = t;
2225 cmdp->u.raw.lun = l;
2226 cmdp->u.raw.bus = b;
2227 cmdp->u.raw.priority = 0;
2228 cmdp->u.raw.link_p = 0;
2229 cmdp->u.raw.sdlen = scsi_bufflen(scp);
2230 cmdp->u.raw.sense_len = 16;
2231 cmdp->u.raw.sense_data = sense_paddr;
2232 cmdp->u.raw.direction =
2233 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
2234 memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
2235 cmdp->u.raw.sg_ranz = 0;
2236 }
2237
2238 if (scsi_bufflen(scp)) {
2239 cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
2240 sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
2241 scsi_sg_count(scp), cmndinfo->dma_dir);
2242 if (mode64) {
2243 struct scatterlist *sl;
2244
2245 cmdp->u.raw64.sdata = (u64)-1;
2246 cmdp->u.raw64.sg_ranz = sgcnt;
2247 scsi_for_each_sg(scp, sl, sgcnt, i) {
2248 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2249 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
2250 }
2251 } else {
2252 struct scatterlist *sl;
2253
2254 cmdp->u.raw.sdata = 0xffffffff;
2255 cmdp->u.raw.sg_ranz = sgcnt;
2256 scsi_for_each_sg(scp, sl, sgcnt, i) {
2257 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
2258 cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
2259 }
2260 }
2261
2262 #ifdef GDTH_STATISTICS
2263 if (max_sg < sgcnt) {
2264 max_sg = sgcnt;
2265 TRACE3(("GDT: max_sg = %d\n",sgcnt));
2266 }
2267 #endif
2268
2269 }
2270 if (mode64) {
2271 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2272 cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
2273 cmdp->u.raw64.sg_lst[0].sg_ptr,
2274 cmdp->u.raw64.sg_lst[0].sg_len));
2275 /* evaluate command size */
2276 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
2277 (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
2278 } else {
2279 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2280 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
2281 cmdp->u.raw.sg_lst[0].sg_ptr,
2282 cmdp->u.raw.sg_lst[0].sg_len));
2283 /* evaluate command size */
2284 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
2285 (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
2286 }
2287 }
2288 /* check space */
2289 if (ha->cmd_len & 3)
2290 ha->cmd_len += (4 - (ha->cmd_len & 3));
2291
2292 if (ha->cmd_cnt > 0) {
2293 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2294 ha->ic_all_size) {
2295 TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
2296 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2297 return 0;
2298 }
2299 }
2300
2301 /* copy command */
2302 gdth_copy_command(ha);
2303 return cmd_index;
2304 }
2305
2306 static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
2307 {
2308 register gdth_cmd_str *cmdp;
2309 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2310 int cmd_index;
2311
2312 cmdp= ha->pccb;
2313 TRACE2(("gdth_special_cmd(): "));
2314
2315 *cmdp = *cmndinfo->internal_cmd_str;
2316 cmdp->RequestBuffer = scp;
2317
2318 /* search free command index */
2319 if (!(cmd_index=gdth_get_cmd_index(ha))) {
2320 TRACE(("GDT: No free command index found\n"));
2321 return 0;
2322 }
2323
2324 /* if it's the first command, set command semaphore */
2325 if (ha->cmd_cnt == 0)
2326 gdth_set_sema0(ha);
2327
2328 /* evaluate command size, check space */
2329 if (cmdp->OpCode == GDT_IOCTL) {
2330 TRACE2(("IOCTL\n"));
2331 ha->cmd_len =
2332 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
2333 } else if (cmdp->Service == CACHESERVICE) {
2334 TRACE2(("cache command %d\n",cmdp->OpCode));
2335 if (ha->cache_feat & GDT_64BIT)
2336 ha->cmd_len =
2337 GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
2338 else
2339 ha->cmd_len =
2340 GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
2341 } else if (cmdp->Service == SCSIRAWSERVICE) {
2342 TRACE2(("raw command %d\n",cmdp->OpCode));
2343 if (ha->raw_feat & GDT_64BIT)
2344 ha->cmd_len =
2345 GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
2346 else
2347 ha->cmd_len =
2348 GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
2349 }
2350
2351 if (ha->cmd_len & 3)
2352 ha->cmd_len += (4 - (ha->cmd_len & 3));
2353
2354 if (ha->cmd_cnt > 0) {
2355 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
2356 ha->ic_all_size) {
2357 TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
2358 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
2359 return 0;
2360 }
2361 }
2362
2363 /* copy command */
2364 gdth_copy_command(ha);
2365 return cmd_index;
2366 }
2367
2368
2369 /* Controller event handling functions */
2370 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
2371 u16 idx, gdth_evt_data *evt)
2372 {
2373 gdth_evt_str *e;
2374
2375 /* no GDTH_LOCK_HA() ! */
2376 TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
2377 if (source == 0) /* no source -> no event */
2378 return NULL;
2379
2380 if (ebuffer[elastidx].event_source == source &&
2381 ebuffer[elastidx].event_idx == idx &&
2382 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
2383 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
2384 (char *)&evt->eu, evt->size)) ||
2385 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
2386 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
2387 (char *)&evt->event_string)))) {
2388 e = &ebuffer[elastidx];
2389 e->last_stamp = (u32)ktime_get_real_seconds();
2390 ++e->same_count;
2391 } else {
2392 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
2393 ++elastidx;
2394 if (elastidx == MAX_EVENTS)
2395 elastidx = 0;
2396 if (elastidx == eoldidx) { /* reached mark ? */
2397 ++eoldidx;
2398 if (eoldidx == MAX_EVENTS)
2399 eoldidx = 0;
2400 }
2401 }
2402 e = &ebuffer[elastidx];
2403 e->event_source = source;
2404 e->event_idx = idx;
2405 e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
2406 e->same_count = 1;
2407 e->event_data = *evt;
2408 e->application = 0;
2409 }
2410 return e;
2411 }
2412
2413 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2414 {
2415 gdth_evt_str *e;
2416 int eindex;
2417 unsigned long flags;
2418
2419 TRACE2(("gdth_read_event() handle %d\n", handle));
2420 spin_lock_irqsave(&ha->smp_lock, flags);
2421 if (handle == -1)
2422 eindex = eoldidx;
2423 else
2424 eindex = handle;
2425 estr->event_source = 0;
2426
2427 if (eindex < 0 || eindex >= MAX_EVENTS) {
2428 spin_unlock_irqrestore(&ha->smp_lock, flags);
2429 return eindex;
2430 }
2431 e = &ebuffer[eindex];
2432 if (e->event_source != 0) {
2433 if (eindex != elastidx) {
2434 if (++eindex == MAX_EVENTS)
2435 eindex = 0;
2436 } else {
2437 eindex = -1;
2438 }
2439 memcpy(estr, e, sizeof(gdth_evt_str));
2440 }
2441 spin_unlock_irqrestore(&ha->smp_lock, flags);
2442 return eindex;
2443 }
2444
2445 static void gdth_readapp_event(gdth_ha_str *ha,
2446 u8 application, gdth_evt_str *estr)
2447 {
2448 gdth_evt_str *e;
2449 int eindex;
2450 unsigned long flags;
2451 u8 found = FALSE;
2452
2453 TRACE2(("gdth_readapp_event() app. %d\n", application));
2454 spin_lock_irqsave(&ha->smp_lock, flags);
2455 eindex = eoldidx;
2456 for (;;) {
2457 e = &ebuffer[eindex];
2458 if (e->event_source == 0)
2459 break;
2460 if ((e->application & application) == 0) {
2461 e->application |= application;
2462 found = TRUE;
2463 break;
2464 }
2465 if (eindex == elastidx)
2466 break;
2467 if (++eindex == MAX_EVENTS)
2468 eindex = 0;
2469 }
2470 if (found)
2471 memcpy(estr, e, sizeof(gdth_evt_str));
2472 else
2473 estr->event_source = 0;
2474 spin_unlock_irqrestore(&ha->smp_lock, flags);
2475 }
2476
2477 static void gdth_clear_events(void)
2478 {
2479 TRACE(("gdth_clear_events()"));
2480
2481 eoldidx = elastidx = 0;
2482 ebuffer[0].event_source = 0;
2483 }
2484
2485
2486 /* SCSI interface functions */
2487
2488 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
2489 int gdth_from_wait, int* pIndex)
2490 {
2491 gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
2492 gdt6_dpram_str __iomem *dp6_ptr;
2493 struct scsi_cmnd *scp;
2494 int rval, i;
2495 u8 IStatus;
2496 u16 Service;
2497 unsigned long flags = 0;
2498
2499 TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
2500
2501 /* if polling and not from gdth_wait() -> return */
2502 if (gdth_polling) {
2503 if (!gdth_from_wait) {
2504 return IRQ_HANDLED;
2505 }
2506 }
2507
2508 if (!gdth_polling)
2509 spin_lock_irqsave(&ha->smp_lock, flags);
2510
2511 /* search controller */
2512 IStatus = gdth_get_status(ha);
2513 if (IStatus == 0) {
2514 /* spurious interrupt */
2515 if (!gdth_polling)
2516 spin_unlock_irqrestore(&ha->smp_lock, flags);
2517 return IRQ_HANDLED;
2518 }
2519
2520 #ifdef GDTH_STATISTICS
2521 ++act_ints;
2522 #endif
2523
2524 if (ha->type == GDT_PCI) {
2525 dp6_ptr = ha->brd;
2526 if (IStatus & 0x80) { /* error flag */
2527 IStatus &= ~0x80;
2528 ha->status = readw(&dp6_ptr->u.ic.Status);
2529 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2530 } else /* no error */
2531 ha->status = S_OK;
2532 ha->info = readl(&dp6_ptr->u.ic.Info[0]);
2533 ha->service = readw(&dp6_ptr->u.ic.Service);
2534 ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
2535
2536 writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
2537 writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
2538 writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
2539 } else if (ha->type == GDT_PCINEW) {
2540 if (IStatus & 0x80) { /* error flag */
2541 IStatus &= ~0x80;
2542 ha->status = inw(PTR2USHORT(&ha->plx->status));
2543 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2544 } else
2545 ha->status = S_OK;
2546 ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
2547 ha->service = inw(PTR2USHORT(&ha->plx->service));
2548 ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
2549
2550 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
2551 outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
2552 } else if (ha->type == GDT_PCIMPR) {
2553 dp6m_ptr = ha->brd;
2554 if (IStatus & 0x80) { /* error flag */
2555 IStatus &= ~0x80;
2556 ha->status = readw(&dp6m_ptr->i960r.status);
2557 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
2558 } else /* no error */
2559 ha->status = S_OK;
2560
2561 ha->info = readl(&dp6m_ptr->i960r.info[0]);
2562 ha->service = readw(&dp6m_ptr->i960r.service);
2563 ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
2564
2565 /* event string */
2566 if (IStatus == ASYNCINDEX) {
2567 if (ha->service != SCREENSERVICE &&
2568 (ha->fw_vers & 0xff) >= 0x1a) {
2569 ha->dvr.severity = readb
2570 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
2571 for (i = 0; i < 256; ++i) {
2572 ha->dvr.event_string[i] = readb
2573 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
2574 if (ha->dvr.event_string[i] == 0)
2575 break;
2576 }
2577 }
2578 }
2579 writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
2580 writeb(0, &dp6m_ptr->i960r.sema1_reg);
2581 } else {
2582 TRACE2(("gdth_interrupt() unknown controller type\n"));
2583 if (!gdth_polling)
2584 spin_unlock_irqrestore(&ha->smp_lock, flags);
2585 return IRQ_HANDLED;
2586 }
2587
2588 TRACE(("gdth_interrupt() index %d stat %d info %d\n",
2589 IStatus,ha->status,ha->info));
2590
2591 if (gdth_from_wait) {
2592 *pIndex = (int)IStatus;
2593 }
2594
2595 if (IStatus == ASYNCINDEX) {
2596 TRACE2(("gdth_interrupt() async. event\n"));
2597 gdth_async_event(ha);
2598 if (!gdth_polling)
2599 spin_unlock_irqrestore(&ha->smp_lock, flags);
2600 gdth_next(ha);
2601 return IRQ_HANDLED;
2602 }
2603
2604 if (IStatus == SPEZINDEX) {
2605 TRACE2(("Service unknown or not initialized !\n"));
2606 ha->dvr.size = sizeof(ha->dvr.eu.driver);
2607 ha->dvr.eu.driver.ionode = ha->hanum;
2608 gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
2609 if (!gdth_polling)
2610 spin_unlock_irqrestore(&ha->smp_lock, flags);
2611 return IRQ_HANDLED;
2612 }
2613 scp = ha->cmd_tab[IStatus-2].cmnd;
2614 Service = ha->cmd_tab[IStatus-2].service;
2615 ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
2616 if (scp == UNUSED_CMND) {
2617 TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
2618 ha->dvr.size = sizeof(ha->dvr.eu.driver);
2619 ha->dvr.eu.driver.ionode = ha->hanum;
2620 ha->dvr.eu.driver.index = IStatus;
2621 gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
2622 if (!gdth_polling)
2623 spin_unlock_irqrestore(&ha->smp_lock, flags);
2624 return IRQ_HANDLED;
2625 }
2626 if (scp == INTERNAL_CMND) {
2627 TRACE(("gdth_interrupt() answer to internal command\n"));
2628 if (!gdth_polling)
2629 spin_unlock_irqrestore(&ha->smp_lock, flags);
2630 return IRQ_HANDLED;
2631 }
2632
2633 TRACE(("gdth_interrupt() sync. status\n"));
2634 rval = gdth_sync_event(ha,Service,IStatus,scp);
2635 if (!gdth_polling)
2636 spin_unlock_irqrestore(&ha->smp_lock, flags);
2637 if (rval == 2) {
2638 gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
2639 } else if (rval == 1) {
2640 gdth_scsi_done(scp);
2641 }
2642
2643 gdth_next(ha);
2644 return IRQ_HANDLED;
2645 }
2646
2647 static irqreturn_t gdth_interrupt(int irq, void *dev_id)
2648 {
2649 gdth_ha_str *ha = dev_id;
2650
2651 return __gdth_interrupt(ha, false, NULL);
2652 }
2653
2654 static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
2655 struct scsi_cmnd *scp)
2656 {
2657 gdth_msg_str *msg;
2658 gdth_cmd_str *cmdp;
2659 u8 b, t;
2660 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2661
2662 cmdp = ha->pccb;
2663 TRACE(("gdth_sync_event() serv %d status %d\n",
2664 service,ha->status));
2665
2666 if (service == SCREENSERVICE) {
2667 msg = ha->pmsg;
2668 TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
2669 msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
2670 if (msg->msg_len > MSGLEN+1)
2671 msg->msg_len = MSGLEN+1;
2672 if (msg->msg_len)
2673 if (!(msg->msg_answer && msg->msg_ext)) {
2674 msg->msg_text[msg->msg_len] = '\0';
2675 printk("%s",msg->msg_text);
2676 }
2677
2678 if (msg->msg_ext && !msg->msg_answer) {
2679 while (gdth_test_busy(ha))
2680 gdth_delay(0);
2681 cmdp->Service = SCREENSERVICE;
2682 cmdp->RequestBuffer = SCREEN_CMND;
2683 gdth_get_cmd_index(ha);
2684 gdth_set_sema0(ha);
2685 cmdp->OpCode = GDT_READ;
2686 cmdp->BoardNode = LOCALBOARD;
2687 cmdp->u.screen.reserved = 0;
2688 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2689 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
2690 ha->cmd_offs_dpmem = 0;
2691 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2692 + sizeof(u64);
2693 ha->cmd_cnt = 0;
2694 gdth_copy_command(ha);
2695 gdth_release_event(ha);
2696 return 0;
2697 }
2698
2699 if (msg->msg_answer && msg->msg_alen) {
2700 /* default answers (getchar() not possible) */
2701 if (msg->msg_alen == 1) {
2702 msg->msg_alen = 0;
2703 msg->msg_len = 1;
2704 msg->msg_text[0] = 0;
2705 } else {
2706 msg->msg_alen -= 2;
2707 msg->msg_len = 2;
2708 msg->msg_text[0] = 1;
2709 msg->msg_text[1] = 0;
2710 }
2711 msg->msg_ext = 0;
2712 msg->msg_answer = 0;
2713 while (gdth_test_busy(ha))
2714 gdth_delay(0);
2715 cmdp->Service = SCREENSERVICE;
2716 cmdp->RequestBuffer = SCREEN_CMND;
2717 gdth_get_cmd_index(ha);
2718 gdth_set_sema0(ha);
2719 cmdp->OpCode = GDT_WRITE;
2720 cmdp->BoardNode = LOCALBOARD;
2721 cmdp->u.screen.reserved = 0;
2722 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
2723 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
2724 ha->cmd_offs_dpmem = 0;
2725 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
2726 + sizeof(u64);
2727 ha->cmd_cnt = 0;
2728 gdth_copy_command(ha);
2729 gdth_release_event(ha);
2730 return 0;
2731 }
2732 printk("\n");
2733
2734 } else {
2735 b = scp->device->channel;
2736 t = scp->device->id;
2737 if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
2738 ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
2739 }
2740 /* cache or raw service */
2741 if (ha->status == S_BSY) {
2742 TRACE2(("Controller busy -> retry !\n"));
2743 if (cmndinfo->OpCode == GDT_MOUNT)
2744 cmndinfo->OpCode = GDT_CLUST_INFO;
2745 /* retry */
2746 return 2;
2747 }
2748 if (scsi_bufflen(scp))
2749 dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
2750 cmndinfo->dma_dir);
2751
2752 if (cmndinfo->sense_paddr)
2753 dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
2754 DMA_FROM_DEVICE);
2755
2756 if (ha->status == S_OK) {
2757 cmndinfo->status = S_OK;
2758 cmndinfo->info = ha->info;
2759 if (cmndinfo->OpCode != -1) {
2760 TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
2761 cmndinfo->OpCode));
2762 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
2763 if (cmndinfo->OpCode == GDT_CLUST_INFO) {
2764 ha->hdr[t].cluster_type = (u8)ha->info;
2765 if (!(ha->hdr[t].cluster_type &
2766 CLUSTER_MOUNTED)) {
2767 /* NOT MOUNTED -> MOUNT */
2768 cmndinfo->OpCode = GDT_MOUNT;
2769 if (ha->hdr[t].cluster_type &
2770 CLUSTER_RESERVED) {
2771 /* cluster drive RESERVED (on the other node) */
2772 cmndinfo->phase = -2; /* reservation conflict */
2773 }
2774 } else {
2775 cmndinfo->OpCode = -1;
2776 }
2777 } else {
2778 if (cmndinfo->OpCode == GDT_MOUNT) {
2779 ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
2780 ha->hdr[t].media_changed = TRUE;
2781 } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
2782 ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
2783 ha->hdr[t].media_changed = TRUE;
2784 }
2785 cmndinfo->OpCode = -1;
2786 }
2787 /* retry */
2788 cmndinfo->priority = HIGH_PRI;
2789 return 2;
2790 } else {
2791 /* RESERVE/RELEASE ? */
2792 if (scp->cmnd[0] == RESERVE) {
2793 ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
2794 } else if (scp->cmnd[0] == RELEASE) {
2795 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
2796 }
2797 scp->result = DID_OK << 16;
2798 scp->sense_buffer[0] = 0;
2799 }
2800 } else {
2801 cmndinfo->status = ha->status;
2802 cmndinfo->info = ha->info;
2803
2804 if (cmndinfo->OpCode != -1) {
2805 TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
2806 cmndinfo->OpCode, ha->status));
2807 if (cmndinfo->OpCode == GDT_SCAN_START ||
2808 cmndinfo->OpCode == GDT_SCAN_END) {
2809 cmndinfo->OpCode = -1;
2810 /* retry */
2811 cmndinfo->priority = HIGH_PRI;
2812 return 2;
2813 }
2814 memset((char*)scp->sense_buffer,0,16);
2815 scp->sense_buffer[0] = 0x70;
2816 scp->sense_buffer[2] = NOT_READY;
2817 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2818 } else if (service == CACHESERVICE) {
2819 if (ha->status == S_CACHE_UNKNOWN &&
2820 (ha->hdr[t].cluster_type &
2821 CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
2822 /* bus reset -> force GDT_CLUST_INFO */
2823 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
2824 }
2825 memset((char*)scp->sense_buffer,0,16);
2826 if (ha->status == (u16)S_CACHE_RESERV) {
2827 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
2828 } else {
2829 scp->sense_buffer[0] = 0x70;
2830 scp->sense_buffer[2] = NOT_READY;
2831 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2832 }
2833 if (!cmndinfo->internal_command) {
2834 ha->dvr.size = sizeof(ha->dvr.eu.sync);
2835 ha->dvr.eu.sync.ionode = ha->hanum;
2836 ha->dvr.eu.sync.service = service;
2837 ha->dvr.eu.sync.status = ha->status;
2838 ha->dvr.eu.sync.info = ha->info;
2839 ha->dvr.eu.sync.hostdrive = t;
2840 if (ha->status >= 0x8000)
2841 gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
2842 else
2843 gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
2844 }
2845 } else {
2846 /* sense buffer filled from controller firmware (DMA) */
2847 if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
2848 scp->result = DID_BAD_TARGET << 16;
2849 } else {
2850 scp->result = (DID_OK << 16) | ha->info;
2851 }
2852 }
2853 }
2854 if (!cmndinfo->wait_for_completion)
2855 cmndinfo->wait_for_completion++;
2856 else
2857 return 1;
2858 }
2859
2860 return 0;
2861 }
2862
2863 static char *async_cache_tab[] = {
2864 /* 0*/ "\011\000\002\002\002\004\002\006\004"
2865 "GDT HA %u, service %u, async. status %u/%lu unknown",
2866 /* 1*/ "\011\000\002\002\002\004\002\006\004"
2867 "GDT HA %u, service %u, async. status %u/%lu unknown",
2868 /* 2*/ "\005\000\002\006\004"
2869 "GDT HA %u, Host Drive %lu not ready",
2870 /* 3*/ "\005\000\002\006\004"
2871 "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
2872 /* 4*/ "\005\000\002\006\004"
2873 "GDT HA %u, mirror update on Host Drive %lu failed",
2874 /* 5*/ "\005\000\002\006\004"
2875 "GDT HA %u, Mirror Drive %lu failed",
2876 /* 6*/ "\005\000\002\006\004"
2877 "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
2878 /* 7*/ "\005\000\002\006\004"
2879 "GDT HA %u, Host Drive %lu write protected",
2880 /* 8*/ "\005\000\002\006\004"
2881 "GDT HA %u, media changed in Host Drive %lu",
2882 /* 9*/ "\005\000\002\006\004"
2883 "GDT HA %u, Host Drive %lu is offline",
2884 /*10*/ "\005\000\002\006\004"
2885 "GDT HA %u, media change of Mirror Drive %lu",
2886 /*11*/ "\005\000\002\006\004"
2887 "GDT HA %u, Mirror Drive %lu is write protected",
2888 /*12*/ "\005\000\002\006\004"
2889 "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
2890 /*13*/ "\007\000\002\006\002\010\002"
2891 "GDT HA %u, Array Drive %u: Cache Drive %u failed",
2892 /*14*/ "\005\000\002\006\002"
2893 "GDT HA %u, Array Drive %u: FAIL state entered",
2894 /*15*/ "\005\000\002\006\002"
2895 "GDT HA %u, Array Drive %u: error",
2896 /*16*/ "\007\000\002\006\002\010\002"
2897 "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
2898 /*17*/ "\005\000\002\006\002"
2899 "GDT HA %u, Array Drive %u: parity build failed",
2900 /*18*/ "\005\000\002\006\002"
2901 "GDT HA %u, Array Drive %u: drive rebuild failed",
2902 /*19*/ "\005\000\002\010\002"
2903 "GDT HA %u, Test of Hot Fix %u failed",
2904 /*20*/ "\005\000\002\006\002"
2905 "GDT HA %u, Array Drive %u: drive build finished successfully",
2906 /*21*/ "\005\000\002\006\002"
2907 "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
2908 /*22*/ "\007\000\002\006\002\010\002"
2909 "GDT HA %u, Array Drive %u: Hot Fix %u activated",
2910 /*23*/ "\005\000\002\006\002"
2911 "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
2912 /*24*/ "\005\000\002\010\002"
2913 "GDT HA %u, mirror update on Cache Drive %u completed",
2914 /*25*/ "\005\000\002\010\002"
2915 "GDT HA %u, mirror update on Cache Drive %lu failed",
2916 /*26*/ "\005\000\002\006\002"
2917 "GDT HA %u, Array Drive %u: drive rebuild started",
2918 /*27*/ "\005\000\002\012\001"
2919 "GDT HA %u, Fault bus %u: SHELF OK detected",
2920 /*28*/ "\005\000\002\012\001"
2921 "GDT HA %u, Fault bus %u: SHELF not OK detected",
2922 /*29*/ "\007\000\002\012\001\013\001"
2923 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
2924 /*30*/ "\007\000\002\012\001\013\001"
2925 "GDT HA %u, Fault bus %u, ID %u: new disk detected",
2926 /*31*/ "\007\000\002\012\001\013\001"
2927 "GDT HA %u, Fault bus %u, ID %u: old disk detected",
2928 /*32*/ "\007\000\002\012\001\013\001"
2929 "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
2930 /*33*/ "\007\000\002\012\001\013\001"
2931 "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
2932 /*34*/ "\011\000\002\012\001\013\001\006\004"
2933 "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
2934 /*35*/ "\007\000\002\012\001\013\001"
2935 "GDT HA %u, Fault bus %u, ID %u: disk write protected",
2936 /*36*/ "\007\000\002\012\001\013\001"
2937 "GDT HA %u, Fault bus %u, ID %u: disk not available",
2938 /*37*/ "\007\000\002\012\001\006\004"
2939 "GDT HA %u, Fault bus %u: swap detected (%lu)",
2940 /*38*/ "\007\000\002\012\001\013\001"
2941 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
2942 /*39*/ "\007\000\002\012\001\013\001"
2943 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
2944 /*40*/ "\007\000\002\012\001\013\001"
2945 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
2946 /*41*/ "\007\000\002\012\001\013\001"
2947 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
2948 /*42*/ "\005\000\002\006\002"
2949 "GDT HA %u, Array Drive %u: drive build started",
2950 /*43*/ "\003\000\002"
2951 "GDT HA %u, DRAM parity error detected",
2952 /*44*/ "\005\000\002\006\002"
2953 "GDT HA %u, Mirror Drive %u: update started",
2954 /*45*/ "\007\000\002\006\002\010\002"
2955 "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
2956 /*46*/ "\005\000\002\006\002"
2957 "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
2958 /*47*/ "\005\000\002\006\002"
2959 "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
2960 /*48*/ "\005\000\002\006\002"
2961 "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
2962 /*49*/ "\005\000\002\006\002"
2963 "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
2964 /*50*/ "\007\000\002\012\001\013\001"
2965 "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
2966 /*51*/ "\005\000\002\006\002"
2967 "GDT HA %u, Array Drive %u: expand started",
2968 /*52*/ "\005\000\002\006\002"
2969 "GDT HA %u, Array Drive %u: expand finished successfully",
2970 /*53*/ "\005\000\002\006\002"
2971 "GDT HA %u, Array Drive %u: expand failed",
2972 /*54*/ "\003\000\002"
2973 "GDT HA %u, CPU temperature critical",
2974 /*55*/ "\003\000\002"
2975 "GDT HA %u, CPU temperature OK",
2976 /*56*/ "\005\000\002\006\004"
2977 "GDT HA %u, Host drive %lu created",
2978 /*57*/ "\005\000\002\006\002"
2979 "GDT HA %u, Array Drive %u: expand restarted",
2980 /*58*/ "\005\000\002\006\002"
2981 "GDT HA %u, Array Drive %u: expand stopped",
2982 /*59*/ "\005\000\002\010\002"
2983 "GDT HA %u, Mirror Drive %u: drive build quited",
2984 /*60*/ "\005\000\002\006\002"
2985 "GDT HA %u, Array Drive %u: parity build quited",
2986 /*61*/ "\005\000\002\006\002"
2987 "GDT HA %u, Array Drive %u: drive rebuild quited",
2988 /*62*/ "\005\000\002\006\002"
2989 "GDT HA %u, Array Drive %u: parity verify started",
2990 /*63*/ "\005\000\002\006\002"
2991 "GDT HA %u, Array Drive %u: parity verify done",
2992 /*64*/ "\005\000\002\006\002"
2993 "GDT HA %u, Array Drive %u: parity verify failed",
2994 /*65*/ "\005\000\002\006\002"
2995 "GDT HA %u, Array Drive %u: parity error detected",
2996 /*66*/ "\005\000\002\006\002"
2997 "GDT HA %u, Array Drive %u: parity verify quited",
2998 /*67*/ "\005\000\002\006\002"
2999 "GDT HA %u, Host Drive %u reserved",
3000 /*68*/ "\005\000\002\006\002"
3001 "GDT HA %u, Host Drive %u mounted and released",
3002 /*69*/ "\005\000\002\006\002"
3003 "GDT HA %u, Host Drive %u released",
3004 /*70*/ "\003\000\002"
3005 "GDT HA %u, DRAM error detected and corrected with ECC",
3006 /*71*/ "\003\000\002"
3007 "GDT HA %u, Uncorrectable DRAM error detected with ECC",
3008 /*72*/ "\011\000\002\012\001\013\001\014\001"
3009 "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
3010 /*73*/ "\005\000\002\006\002"
3011 "GDT HA %u, Host drive %u resetted locally",
3012 /*74*/ "\005\000\002\006\002"
3013 "GDT HA %u, Host drive %u resetted remotely",
3014 /*75*/ "\003\000\002"
3015 "GDT HA %u, async. status 75 unknown",
3016 };
3017
3018
3019 static int gdth_async_event(gdth_ha_str *ha)
3020 {
3021 gdth_cmd_str *cmdp;
3022 int cmd_index;
3023
3024 cmdp= ha->pccb;
3025 TRACE2(("gdth_async_event() ha %d serv %d\n",
3026 ha->hanum, ha->service));
3027
3028 if (ha->service == SCREENSERVICE) {
3029 if (ha->status == MSG_REQUEST) {
3030 while (gdth_test_busy(ha))
3031 gdth_delay(0);
3032 cmdp->Service = SCREENSERVICE;
3033 cmdp->RequestBuffer = SCREEN_CMND;
3034 cmd_index = gdth_get_cmd_index(ha);
3035 gdth_set_sema0(ha);
3036 cmdp->OpCode = GDT_READ;
3037 cmdp->BoardNode = LOCALBOARD;
3038 cmdp->u.screen.reserved = 0;
3039 cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
3040 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3041 ha->cmd_offs_dpmem = 0;
3042 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3043 + sizeof(u64);
3044 ha->cmd_cnt = 0;
3045 gdth_copy_command(ha);
3046 printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
3047 (u16)((ha->brd_phys>>3)&0x1f));
3048 gdth_release_event(ha);
3049 }
3050
3051 } else {
3052 if (ha->type == GDT_PCIMPR &&
3053 (ha->fw_vers & 0xff) >= 0x1a) {
3054 ha->dvr.size = 0;
3055 ha->dvr.eu.async.ionode = ha->hanum;
3056 ha->dvr.eu.async.status = ha->status;
3057 /* severity and event_string already set! */
3058 } else {
3059 ha->dvr.size = sizeof(ha->dvr.eu.async);
3060 ha->dvr.eu.async.ionode = ha->hanum;
3061 ha->dvr.eu.async.service = ha->service;
3062 ha->dvr.eu.async.status = ha->status;
3063 ha->dvr.eu.async.info = ha->info;
3064 *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
3065 }
3066 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
3067 gdth_log_event( &ha->dvr, NULL );
3068
3069 /* new host drive from expand? */
3070 if (ha->service == CACHESERVICE && ha->status == 56) {
3071 TRACE2(("gdth_async_event(): new host drive %d created\n",
3072 (u16)ha->info));
3073 /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
3074 }
3075 }
3076 return 1;
3077 }
3078
3079 static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
3080 {
3081 gdth_stackframe stack;
3082 char *f = NULL;
3083 int i,j;
3084
3085 TRACE2(("gdth_log_event()\n"));
3086 if (dvr->size == 0) {
3087 if (buffer == NULL) {
3088 printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
3089 } else {
3090 sprintf(buffer,"Adapter %d: %s\n",
3091 dvr->eu.async.ionode,dvr->event_string);
3092 }
3093 } else if (dvr->eu.async.service == CACHESERVICE &&
3094 INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
3095 TRACE2(("GDT: Async. event cache service, event no.: %d\n",
3096 dvr->eu.async.status));
3097
3098 f = async_cache_tab[dvr->eu.async.status];
3099
3100 /* i: parameter to push, j: stack element to fill */
3101 for (j=0,i=1; i < f[0]; i+=2) {
3102 switch (f[i+1]) {
3103 case 4:
3104 stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
3105 break;
3106 case 2:
3107 stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
3108 break;
3109 case 1:
3110 stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
3111 break;
3112 default:
3113 break;
3114 }
3115 }
3116
3117 if (buffer == NULL) {
3118 printk(&f[(int)f[0]],stack);
3119 printk("\n");
3120 } else {
3121 sprintf(buffer,&f[(int)f[0]],stack);
3122 }
3123
3124 } else {
3125 if (buffer == NULL) {
3126 printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
3127 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
3128 } else {
3129 sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
3130 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
3131 }
3132 }
3133 }
3134
3135 #ifdef GDTH_STATISTICS
3136 static u8 gdth_timer_running;
3137
3138 static void gdth_timeout(struct timer_list *unused)
3139 {
3140 u32 i;
3141 struct scsi_cmnd *nscp;
3142 gdth_ha_str *ha;
3143 unsigned long flags;
3144
3145 if(unlikely(list_empty(&gdth_instances))) {
3146 gdth_timer_running = 0;
3147 return;
3148 }
3149
3150 ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
3151 spin_lock_irqsave(&ha->smp_lock, flags);
3152
3153 for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
3154 if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
3155 ++act_stats;
3156
3157 for (act_rq=0,
3158 nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
3159 ++act_rq;
3160
3161 TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
3162 act_ints, act_ios, act_stats, act_rq));
3163 act_ints = act_ios = 0;
3164
3165 gdth_timer.expires = jiffies + 30 * HZ;
3166 add_timer(&gdth_timer);
3167 spin_unlock_irqrestore(&ha->smp_lock, flags);
3168 }
3169
3170 static void gdth_timer_init(void)
3171 {
3172 if (gdth_timer_running)
3173 return;
3174 gdth_timer_running = 1;
3175 TRACE2(("gdth_detect(): Initializing timer !\n"));
3176 gdth_timer.expires = jiffies + HZ;
3177 add_timer(&gdth_timer);
3178 }
3179 #else
3180 static inline void gdth_timer_init(void)
3181 {
3182 }
3183 #endif
3184
3185 static void __init internal_setup(char *str,int *ints)
3186 {
3187 int i;
3188 char *cur_str, *argv;
3189
3190 TRACE2(("internal_setup() str %s ints[0] %d\n",
3191 str ? str:"NULL", ints ? ints[0]:0));
3192
3193 /* analyse string */
3194 argv = str;
3195 while (argv && (cur_str = strchr(argv, ':'))) {
3196 int val = 0, c = *++cur_str;
3197
3198 if (c == 'n' || c == 'N')
3199 val = 0;
3200 else if (c == 'y' || c == 'Y')
3201 val = 1;
3202 else
3203 val = (int)simple_strtoul(cur_str, NULL, 0);
3204
3205 if (!strncmp(argv, "disable:", 8))
3206 disable = val;
3207 else if (!strncmp(argv, "reserve_mode:", 13))
3208 reserve_mode = val;
3209 else if (!strncmp(argv, "reverse_scan:", 13))
3210 reverse_scan = val;
3211 else if (!strncmp(argv, "hdr_channel:", 12))
3212 hdr_channel = val;
3213 else if (!strncmp(argv, "max_ids:", 8))
3214 max_ids = val;
3215 else if (!strncmp(argv, "rescan:", 7))
3216 rescan = val;
3217 else if (!strncmp(argv, "shared_access:", 14))
3218 shared_access = val;
3219 else if (!strncmp(argv, "reserve_list:", 13)) {
3220 reserve_list[0] = val;
3221 for (i = 1; i < MAX_RES_ARGS; i++) {
3222 cur_str = strchr(cur_str, ',');
3223 if (!cur_str)
3224 break;
3225 if (!isdigit((int)*++cur_str)) {
3226 --cur_str;
3227 break;
3228 }
3229 reserve_list[i] =
3230 (int)simple_strtoul(cur_str, NULL, 0);
3231 }
3232 if (!cur_str)
3233 break;
3234 argv = ++cur_str;
3235 continue;
3236 }
3237
3238 if ((argv = strchr(argv, ',')))
3239 ++argv;
3240 }
3241 }
3242
3243 int __init option_setup(char *str)
3244 {
3245 int ints[MAXHA];
3246 char *cur = str;
3247 int i = 1;
3248
3249 TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
3250
3251 while (cur && isdigit(*cur) && i < MAXHA) {
3252 ints[i++] = simple_strtoul(cur, NULL, 0);
3253 if ((cur = strchr(cur, ',')) != NULL) cur++;
3254 }
3255
3256 ints[0] = i - 1;
3257 internal_setup(cur, ints);
3258 return 1;
3259 }
3260
3261 static const char *gdth_ctr_name(gdth_ha_str *ha)
3262 {
3263 TRACE2(("gdth_ctr_name()\n"));
3264
3265 if (ha->type == GDT_PCI) {
3266 switch (ha->pdev->device) {
3267 case PCI_DEVICE_ID_VORTEX_GDT60x0:
3268 return("GDT6000/6020/6050");
3269 case PCI_DEVICE_ID_VORTEX_GDT6000B:
3270 return("GDT6000B/6010");
3271 }
3272 }
3273 /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
3274
3275 return("");
3276 }
3277
3278 static const char *gdth_info(struct Scsi_Host *shp)
3279 {
3280 gdth_ha_str *ha = shost_priv(shp);
3281
3282 TRACE2(("gdth_info()\n"));
3283 return ((const char *)ha->binfo.type_string);
3284 }
3285
3286 static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3287 {
3288 gdth_ha_str *ha = shost_priv(scp->device->host);
3289 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3290 u8 b, t;
3291 unsigned long flags;
3292 enum blk_eh_timer_return retval = BLK_EH_DONE;
3293
3294 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3295 b = scp->device->channel;
3296 t = scp->device->id;
3297
3298 /*
3299 * We don't really honor the command timeout, but we try to
3300 * honor 6 times of the actual command timeout! So reset the
3301 * timer if this is less than 6th timeout on this command!
3302 */
3303 if (++cmndinfo->timeout_count < 6)
3304 retval = BLK_EH_RESET_TIMER;
3305
3306 /* Reset the timeout if it is locked IO */
3307 spin_lock_irqsave(&ha->smp_lock, flags);
3308 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3309 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3310 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3311 retval = BLK_EH_RESET_TIMER;
3312 }
3313 spin_unlock_irqrestore(&ha->smp_lock, flags);
3314
3315 return retval;
3316 }
3317
3318
3319 static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
3320 {
3321 gdth_ha_str *ha = shost_priv(scp->device->host);
3322 int i;
3323 unsigned long flags;
3324 struct scsi_cmnd *cmnd;
3325 u8 b;
3326
3327 TRACE2(("gdth_eh_bus_reset()\n"));
3328
3329 b = scp->device->channel;
3330
3331 /* clear command tab */
3332 spin_lock_irqsave(&ha->smp_lock, flags);
3333 for (i = 0; i < GDTH_MAXCMDS; ++i) {
3334 cmnd = ha->cmd_tab[i].cmnd;
3335 if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
3336 ha->cmd_tab[i].cmnd = UNUSED_CMND;
3337 }
3338 spin_unlock_irqrestore(&ha->smp_lock, flags);
3339
3340 if (b == ha->virt_bus) {
3341 /* host drives */
3342 for (i = 0; i < MAX_HDRIVES; ++i) {
3343 if (ha->hdr[i].present) {
3344 spin_lock_irqsave(&ha->smp_lock, flags);
3345 gdth_polling = TRUE;
3346 while (gdth_test_busy(ha))
3347 gdth_delay(0);
3348 if (gdth_internal_cmd(ha, CACHESERVICE,
3349 GDT_CLUST_RESET, i, 0, 0))
3350 ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
3351 gdth_polling = FALSE;
3352 spin_unlock_irqrestore(&ha->smp_lock, flags);
3353 }
3354 }
3355 } else {
3356 /* raw devices */
3357 spin_lock_irqsave(&ha->smp_lock, flags);
3358 for (i = 0; i < MAXID; ++i)
3359 ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
3360 gdth_polling = TRUE;
3361 while (gdth_test_busy(ha))
3362 gdth_delay(0);
3363 gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
3364 BUS_L2P(ha,b), 0, 0);
3365 gdth_polling = FALSE;
3366 spin_unlock_irqrestore(&ha->smp_lock, flags);
3367 }
3368 return SUCCESS;
3369 }
3370
3371 static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
3372 {
3373 u8 b, t;
3374 gdth_ha_str *ha = shost_priv(sdev->host);
3375 struct scsi_device *sd;
3376 unsigned capacity;
3377
3378 sd = sdev;
3379 capacity = cap;
3380 b = sd->channel;
3381 t = sd->id;
3382 TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
3383
3384 if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
3385 /* raw device or host drive without mapping information */
3386 TRACE2(("Evaluate mapping\n"));
3387 gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
3388 } else {
3389 ip[0] = ha->hdr[t].heads;
3390 ip[1] = ha->hdr[t].secs;
3391 ip[2] = capacity / ip[0] / ip[1];
3392 }
3393
3394 TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
3395 ip[0],ip[1],ip[2]));
3396 return 0;
3397 }
3398
3399
3400 static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
3401 void (*done)(struct scsi_cmnd *))
3402 {
3403 gdth_ha_str *ha = shost_priv(scp->device->host);
3404 struct gdth_cmndinfo *cmndinfo;
3405
3406 TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
3407
3408 cmndinfo = gdth_get_cmndinfo(ha);
3409 BUG_ON(!cmndinfo);
3410
3411 scp->scsi_done = done;
3412 cmndinfo->timeout_count = 0;
3413 cmndinfo->priority = DEFAULT_PRI;
3414
3415 return __gdth_queuecommand(ha, scp, cmndinfo);
3416 }
3417
3418 static DEF_SCSI_QCMD(gdth_queuecommand)
3419
3420 static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
3421 struct gdth_cmndinfo *cmndinfo)
3422 {
3423 scp->host_scribble = (unsigned char *)cmndinfo;
3424 cmndinfo->wait_for_completion = 1;
3425 cmndinfo->phase = -1;
3426 cmndinfo->OpCode = -1;
3427
3428 #ifdef GDTH_STATISTICS
3429 ++act_ios;
3430 #endif
3431
3432 gdth_putq(ha, scp, cmndinfo->priority);
3433 gdth_next(ha);
3434 return 0;
3435 }
3436
3437
3438 static int gdth_open(struct inode *inode, struct file *filep)
3439 {
3440 gdth_ha_str *ha;
3441
3442 mutex_lock(&gdth_mutex);
3443 list_for_each_entry(ha, &gdth_instances, list) {
3444 if (!ha->sdev)
3445 ha->sdev = scsi_get_host_dev(ha->shost);
3446 }
3447 mutex_unlock(&gdth_mutex);
3448
3449 TRACE(("gdth_open()\n"));
3450 return 0;
3451 }
3452
3453 static int gdth_close(struct inode *inode, struct file *filep)
3454 {
3455 TRACE(("gdth_close()\n"));
3456 return 0;
3457 }
3458
3459 static int ioc_event(void __user *arg)
3460 {
3461 gdth_ioctl_event evt;
3462 gdth_ha_str *ha;
3463 unsigned long flags;
3464
3465 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
3466 return -EFAULT;
3467 ha = gdth_find_ha(evt.ionode);
3468 if (!ha)
3469 return -EFAULT;
3470
3471 if (evt.erase == 0xff) {
3472 if (evt.event.event_source == ES_TEST)
3473 evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
3474 else if (evt.event.event_source == ES_DRIVER)
3475 evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
3476 else if (evt.event.event_source == ES_SYNC)
3477 evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
3478 else
3479 evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
3480 spin_lock_irqsave(&ha->smp_lock, flags);
3481 gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
3482 &evt.event.event_data);
3483 spin_unlock_irqrestore(&ha->smp_lock, flags);
3484 } else if (evt.erase == 0xfe) {
3485 gdth_clear_events();
3486 } else if (evt.erase == 0) {
3487 evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
3488 } else {
3489 gdth_readapp_event(ha, evt.erase, &evt.event);
3490 }
3491 if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
3492 return -EFAULT;
3493 return 0;
3494 }
3495
3496 static int ioc_lockdrv(void __user *arg)
3497 {
3498 gdth_ioctl_lockdrv ldrv;
3499 u8 i, j;
3500 unsigned long flags;
3501 gdth_ha_str *ha;
3502
3503 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
3504 return -EFAULT;
3505 ha = gdth_find_ha(ldrv.ionode);
3506 if (!ha)
3507 return -EFAULT;
3508
3509 for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
3510 j = ldrv.drives[i];
3511 if (j >= MAX_HDRIVES || !ha->hdr[j].present)
3512 continue;
3513 if (ldrv.lock) {
3514 spin_lock_irqsave(&ha->smp_lock, flags);
3515 ha->hdr[j].lock = 1;
3516 spin_unlock_irqrestore(&ha->smp_lock, flags);
3517 gdth_wait_completion(ha, ha->bus_cnt, j);
3518 } else {
3519 spin_lock_irqsave(&ha->smp_lock, flags);
3520 ha->hdr[j].lock = 0;
3521 spin_unlock_irqrestore(&ha->smp_lock, flags);
3522 gdth_next(ha);
3523 }
3524 }
3525 return 0;
3526 }
3527
3528 static int ioc_resetdrv(void __user *arg, char *cmnd)
3529 {
3530 gdth_ioctl_reset res;
3531 gdth_cmd_str cmd;
3532 gdth_ha_str *ha;
3533 int rval;
3534
3535 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
3536 res.number >= MAX_HDRIVES)
3537 return -EFAULT;
3538 ha = gdth_find_ha(res.ionode);
3539 if (!ha)
3540 return -EFAULT;
3541
3542 if (!ha->hdr[res.number].present)
3543 return 0;
3544 memset(&cmd, 0, sizeof(gdth_cmd_str));
3545 cmd.Service = CACHESERVICE;
3546 cmd.OpCode = GDT_CLUST_RESET;
3547 if (ha->cache_feat & GDT_64BIT)
3548 cmd.u.cache64.DeviceNo = res.number;
3549 else
3550 cmd.u.cache.DeviceNo = res.number;
3551
3552 rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
3553 if (rval < 0)
3554 return rval;
3555 res.status = rval;
3556
3557 if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
3558 return -EFAULT;
3559 return 0;
3560 }
3561
3562 static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
3563 u64 paddr)
3564 {
3565 if (ha->cache_feat & GDT_64BIT) {
3566 /* copy elements from 32-bit IOCTL structure */
3567 gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
3568 gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
3569 gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
3570
3571 if (ha->cache_feat & SCATTER_GATHER) {
3572 gen->command.u.cache64.DestAddr = (u64)-1;
3573 gen->command.u.cache64.sg_canz = 1;
3574 gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
3575 gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
3576 gen->command.u.cache64.sg_lst[1].sg_len = 0;
3577 } else {
3578 gen->command.u.cache64.DestAddr = paddr;
3579 gen->command.u.cache64.sg_canz = 0;
3580 }
3581 } else {
3582 if (ha->cache_feat & SCATTER_GATHER) {
3583 gen->command.u.cache.DestAddr = 0xffffffff;
3584 gen->command.u.cache.sg_canz = 1;
3585 gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
3586 gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
3587 gen->command.u.cache.sg_lst[1].sg_len = 0;
3588 } else {
3589 gen->command.u.cache.DestAddr = paddr;
3590 gen->command.u.cache.sg_canz = 0;
3591 }
3592 }
3593 }
3594
3595 static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
3596 u64 paddr)
3597 {
3598 if (ha->raw_feat & GDT_64BIT) {
3599 /* copy elements from 32-bit IOCTL structure */
3600 char cmd[16];
3601
3602 gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
3603 gen->command.u.raw64.bus = gen->command.u.raw.bus;
3604 gen->command.u.raw64.lun = gen->command.u.raw.lun;
3605 gen->command.u.raw64.target = gen->command.u.raw.target;
3606 memcpy(cmd, gen->command.u.raw.cmd, 16);
3607 memcpy(gen->command.u.raw64.cmd, cmd, 16);
3608 gen->command.u.raw64.clen = gen->command.u.raw.clen;
3609 gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
3610 gen->command.u.raw64.direction = gen->command.u.raw.direction;
3611
3612 /* addresses */
3613 if (ha->raw_feat & SCATTER_GATHER) {
3614 gen->command.u.raw64.sdata = (u64)-1;
3615 gen->command.u.raw64.sg_ranz = 1;
3616 gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
3617 gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
3618 gen->command.u.raw64.sg_lst[1].sg_len = 0;
3619 } else {
3620 gen->command.u.raw64.sdata = paddr;
3621 gen->command.u.raw64.sg_ranz = 0;
3622 }
3623
3624 gen->command.u.raw64.sense_data = paddr + gen->data_len;
3625 } else {
3626 if (ha->raw_feat & SCATTER_GATHER) {
3627 gen->command.u.raw.sdata = 0xffffffff;
3628 gen->command.u.raw.sg_ranz = 1;
3629 gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
3630 gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
3631 gen->command.u.raw.sg_lst[1].sg_len = 0;
3632 } else {
3633 gen->command.u.raw.sdata = paddr;
3634 gen->command.u.raw.sg_ranz = 0;
3635 }
3636
3637 gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
3638 }
3639 }
3640
3641 static int ioc_general(void __user *arg, char *cmnd)
3642 {
3643 gdth_ioctl_general gen;
3644 gdth_ha_str *ha;
3645 char *buf = NULL;
3646 dma_addr_t paddr;
3647 int rval;
3648
3649 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
3650 return -EFAULT;
3651 ha = gdth_find_ha(gen.ionode);
3652 if (!ha)
3653 return -EFAULT;
3654
3655 if (gen.data_len > INT_MAX)
3656 return -EINVAL;
3657 if (gen.sense_len > INT_MAX)
3658 return -EINVAL;
3659 if (gen.data_len + gen.sense_len > INT_MAX)
3660 return -EINVAL;
3661
3662 if (gen.data_len + gen.sense_len > 0) {
3663 buf = dma_alloc_coherent(&ha->pdev->dev,
3664 gen.data_len + gen.sense_len, &paddr,
3665 GFP_KERNEL);
3666 if (!buf)
3667 return -EFAULT;
3668
3669 rval = -EFAULT;
3670 if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
3671 gen.data_len + gen.sense_len))
3672 goto out_free_buf;
3673
3674 if (gen.command.OpCode == GDT_IOCTL)
3675 gen.command.u.ioctl.p_param = paddr;
3676 else if (gen.command.Service == CACHESERVICE)
3677 gdth_ioc_cacheservice(ha, &gen, paddr);
3678 else if (gen.command.Service == SCSIRAWSERVICE)
3679 gdth_ioc_scsiraw(ha, &gen, paddr);
3680 else
3681 goto out_free_buf;
3682 }
3683
3684 rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
3685 &gen.info);
3686 if (rval < 0)
3687 goto out_free_buf;
3688 gen.status = rval;
3689
3690 rval = -EFAULT;
3691 if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
3692 gen.data_len + gen.sense_len))
3693 goto out_free_buf;
3694 if (copy_to_user(arg, &gen,
3695 sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
3696 goto out_free_buf;
3697
3698 rval = 0;
3699 out_free_buf:
3700 if (buf)
3701 dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
3702 buf, paddr);
3703 return rval;
3704 }
3705
3706 static int ioc_hdrlist(void __user *arg, char *cmnd)
3707 {
3708 gdth_ioctl_rescan *rsc;
3709 gdth_cmd_str *cmd;
3710 gdth_ha_str *ha;
3711 u8 i;
3712 int rc = -ENOMEM;
3713 u32 cluster_type = 0;
3714
3715 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
3716 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
3717 if (!rsc || !cmd)
3718 goto free_fail;
3719
3720 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
3721 (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
3722 rc = -EFAULT;
3723 goto free_fail;
3724 }
3725 memset(cmd, 0, sizeof(gdth_cmd_str));
3726
3727 for (i = 0; i < MAX_HDRIVES; ++i) {
3728 if (!ha->hdr[i].present) {
3729 rsc->hdr_list[i].bus = 0xff;
3730 continue;
3731 }
3732 rsc->hdr_list[i].bus = ha->virt_bus;
3733 rsc->hdr_list[i].target = i;
3734 rsc->hdr_list[i].lun = 0;
3735 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
3736 if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
3737 cmd->Service = CACHESERVICE;
3738 cmd->OpCode = GDT_CLUST_INFO;
3739 if (ha->cache_feat & GDT_64BIT)
3740 cmd->u.cache64.DeviceNo = i;
3741 else
3742 cmd->u.cache.DeviceNo = i;
3743 if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
3744 rsc->hdr_list[i].cluster_type = cluster_type;
3745 }
3746 }
3747
3748 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
3749 rc = -EFAULT;
3750 else
3751 rc = 0;
3752
3753 free_fail:
3754 kfree(rsc);
3755 kfree(cmd);
3756 return rc;
3757 }
3758
3759 static int ioc_rescan(void __user *arg, char *cmnd)
3760 {
3761 gdth_ioctl_rescan *rsc;
3762 gdth_cmd_str *cmd;
3763 u16 i, status, hdr_cnt;
3764 u32 info;
3765 int cyls, hds, secs;
3766 int rc = -ENOMEM;
3767 unsigned long flags;
3768 gdth_ha_str *ha;
3769
3770 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
3771 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
3772 if (!cmd || !rsc)
3773 goto free_fail;
3774
3775 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
3776 (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
3777 rc = -EFAULT;
3778 goto free_fail;
3779 }
3780 memset(cmd, 0, sizeof(gdth_cmd_str));
3781
3782 if (rsc->flag == 0) {
3783 /* old method: re-init. cache service */
3784 cmd->Service = CACHESERVICE;
3785 if (ha->cache_feat & GDT_64BIT) {
3786 cmd->OpCode = GDT_X_INIT_HOST;
3787 cmd->u.cache64.DeviceNo = LINUX_OS;
3788 } else {
3789 cmd->OpCode = GDT_INIT;
3790 cmd->u.cache.DeviceNo = LINUX_OS;
3791 }
3792
3793 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3794 i = 0;
3795 hdr_cnt = (status == S_OK ? (u16)info : 0);
3796 } else {
3797 i = rsc->hdr_no;
3798 hdr_cnt = i + 1;
3799 }
3800
3801 for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
3802 cmd->Service = CACHESERVICE;
3803 cmd->OpCode = GDT_INFO;
3804 if (ha->cache_feat & GDT_64BIT)
3805 cmd->u.cache64.DeviceNo = i;
3806 else
3807 cmd->u.cache.DeviceNo = i;
3808
3809 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3810
3811 spin_lock_irqsave(&ha->smp_lock, flags);
3812 rsc->hdr_list[i].bus = ha->virt_bus;
3813 rsc->hdr_list[i].target = i;
3814 rsc->hdr_list[i].lun = 0;
3815 if (status != S_OK) {
3816 ha->hdr[i].present = FALSE;
3817 } else {
3818 ha->hdr[i].present = TRUE;
3819 ha->hdr[i].size = info;
3820 /* evaluate mapping */
3821 ha->hdr[i].size &= ~SECS32;
3822 gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
3823 ha->hdr[i].heads = hds;
3824 ha->hdr[i].secs = secs;
3825 /* round size */
3826 ha->hdr[i].size = cyls * hds * secs;
3827 }
3828 spin_unlock_irqrestore(&ha->smp_lock, flags);
3829 if (status != S_OK)
3830 continue;
3831
3832 /* extended info, if GDT_64BIT, for drives > 2 TB */
3833 /* but we need ha->info2, not yet stored in scp->SCp */
3834
3835 /* devtype, cluster info, R/W attribs */
3836 cmd->Service = CACHESERVICE;
3837 cmd->OpCode = GDT_DEVTYPE;
3838 if (ha->cache_feat & GDT_64BIT)
3839 cmd->u.cache64.DeviceNo = i;
3840 else
3841 cmd->u.cache.DeviceNo = i;
3842
3843 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3844
3845 spin_lock_irqsave(&ha->smp_lock, flags);
3846 ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
3847 spin_unlock_irqrestore(&ha->smp_lock, flags);
3848
3849 cmd->Service = CACHESERVICE;
3850 cmd->OpCode = GDT_CLUST_INFO;
3851 if (ha->cache_feat & GDT_64BIT)
3852 cmd->u.cache64.DeviceNo = i;
3853 else
3854 cmd->u.cache.DeviceNo = i;
3855
3856 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3857
3858 spin_lock_irqsave(&ha->smp_lock, flags);
3859 ha->hdr[i].cluster_type =
3860 ((status == S_OK && !shared_access) ? (u16)info : 0);
3861 spin_unlock_irqrestore(&ha->smp_lock, flags);
3862 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
3863
3864 cmd->Service = CACHESERVICE;
3865 cmd->OpCode = GDT_RW_ATTRIBS;
3866 if (ha->cache_feat & GDT_64BIT)
3867 cmd->u.cache64.DeviceNo = i;
3868 else
3869 cmd->u.cache.DeviceNo = i;
3870
3871 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
3872
3873 spin_lock_irqsave(&ha->smp_lock, flags);
3874 ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
3875 spin_unlock_irqrestore(&ha->smp_lock, flags);
3876 }
3877
3878 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
3879 rc = -EFAULT;
3880 else
3881 rc = 0;
3882
3883 free_fail:
3884 kfree(rsc);
3885 kfree(cmd);
3886 return rc;
3887 }
3888
3889 static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3890 {
3891 gdth_ha_str *ha;
3892 struct scsi_cmnd *scp;
3893 unsigned long flags;
3894 char cmnd[MAX_COMMAND_SIZE];
3895 void __user *argp = (void __user *)arg;
3896
3897 memset(cmnd, 0xff, 12);
3898
3899 TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
3900
3901 switch (cmd) {
3902 case GDTIOCTL_CTRCNT:
3903 {
3904 int cnt = gdth_ctr_count;
3905 if (put_user(cnt, (int __user *)argp))
3906 return -EFAULT;
3907 break;
3908 }
3909
3910 case GDTIOCTL_DRVERS:
3911 {
3912 int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
3913 if (put_user(ver, (int __user *)argp))
3914 return -EFAULT;
3915 break;
3916 }
3917
3918 case GDTIOCTL_OSVERS:
3919 {
3920 gdth_ioctl_osvers osv;
3921
3922 osv.version = (u8)(LINUX_VERSION_CODE >> 16);
3923 osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
3924 osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
3925 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
3926 return -EFAULT;
3927 break;
3928 }
3929
3930 case GDTIOCTL_CTRTYPE:
3931 {
3932 gdth_ioctl_ctrtype ctrt;
3933
3934 if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
3935 (NULL == (ha = gdth_find_ha(ctrt.ionode))))
3936 return -EFAULT;
3937
3938 if (ha->type != GDT_PCIMPR) {
3939 ctrt.type = (u8)((ha->stype<<4) + 6);
3940 } else {
3941 ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
3942 if (ha->stype >= 0x300)
3943 ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
3944 else
3945 ctrt.ext_type = 0x6000 | ha->stype;
3946 }
3947 ctrt.device_id = ha->pdev->device;
3948 ctrt.sub_device_id = ha->pdev->subsystem_device;
3949 ctrt.info = ha->brd_phys;
3950 ctrt.oem_id = ha->oem_id;
3951 if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
3952 return -EFAULT;
3953 break;
3954 }
3955
3956 case GDTIOCTL_GENERAL:
3957 return ioc_general(argp, cmnd);
3958
3959 case GDTIOCTL_EVENT:
3960 return ioc_event(argp);
3961
3962 case GDTIOCTL_LOCKDRV:
3963 return ioc_lockdrv(argp);
3964
3965 case GDTIOCTL_LOCKCHN:
3966 {
3967 gdth_ioctl_lockchn lchn;
3968 u8 i, j;
3969
3970 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
3971 (NULL == (ha = gdth_find_ha(lchn.ionode))))
3972 return -EFAULT;
3973
3974 i = lchn.channel;
3975 if (i < ha->bus_cnt) {
3976 if (lchn.lock) {
3977 spin_lock_irqsave(&ha->smp_lock, flags);
3978 ha->raw[i].lock = 1;
3979 spin_unlock_irqrestore(&ha->smp_lock, flags);
3980 for (j = 0; j < ha->tid_cnt; ++j)
3981 gdth_wait_completion(ha, i, j);
3982 } else {
3983 spin_lock_irqsave(&ha->smp_lock, flags);
3984 ha->raw[i].lock = 0;
3985 spin_unlock_irqrestore(&ha->smp_lock, flags);
3986 for (j = 0; j < ha->tid_cnt; ++j)
3987 gdth_next(ha);
3988 }
3989 }
3990 break;
3991 }
3992
3993 case GDTIOCTL_RESCAN:
3994 return ioc_rescan(argp, cmnd);
3995
3996 case GDTIOCTL_HDRLIST:
3997 return ioc_hdrlist(argp, cmnd);
3998
3999 case GDTIOCTL_RESET_BUS:
4000 {
4001 gdth_ioctl_reset res;
4002 int rval;
4003
4004 if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
4005 (NULL == (ha = gdth_find_ha(res.ionode))))
4006 return -EFAULT;
4007
4008 scp = kzalloc(sizeof(*scp), GFP_KERNEL);
4009 if (!scp)
4010 return -ENOMEM;
4011 scp->device = ha->sdev;
4012 scp->cmd_len = 12;
4013 scp->device->channel = res.number;
4014 rval = gdth_eh_bus_reset(scp);
4015 res.status = (rval == SUCCESS ? S_OK : S_GENERR);
4016 kfree(scp);
4017
4018 if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
4019 return -EFAULT;
4020 break;
4021 }
4022
4023 case GDTIOCTL_RESET_DRV:
4024 return ioc_resetdrv(argp, cmnd);
4025
4026 default:
4027 break;
4028 }
4029 return 0;
4030 }
4031
4032 static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
4033 unsigned long arg)
4034 {
4035 int ret;
4036
4037 mutex_lock(&gdth_mutex);
4038 ret = gdth_ioctl(file, cmd, arg);
4039 mutex_unlock(&gdth_mutex);
4040
4041 return ret;
4042 }
4043
4044 /* flush routine */
4045 static void gdth_flush(gdth_ha_str *ha)
4046 {
4047 int i;
4048 gdth_cmd_str gdtcmd;
4049 char cmnd[MAX_COMMAND_SIZE];
4050 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
4051
4052 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
4053
4054 for (i = 0; i < MAX_HDRIVES; ++i) {
4055 if (ha->hdr[i].present) {
4056 gdtcmd.BoardNode = LOCALBOARD;
4057 gdtcmd.Service = CACHESERVICE;
4058 gdtcmd.OpCode = GDT_FLUSH;
4059 if (ha->cache_feat & GDT_64BIT) {
4060 gdtcmd.u.cache64.DeviceNo = i;
4061 gdtcmd.u.cache64.BlockNo = 1;
4062 gdtcmd.u.cache64.sg_canz = 0;
4063 } else {
4064 gdtcmd.u.cache.DeviceNo = i;
4065 gdtcmd.u.cache.BlockNo = 1;
4066 gdtcmd.u.cache.sg_canz = 0;
4067 }
4068 TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
4069
4070 gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
4071 }
4072 }
4073 }
4074
4075 /* configure lun */
4076 static int gdth_slave_configure(struct scsi_device *sdev)
4077 {
4078 sdev->skip_ms_page_3f = 1;
4079 sdev->skip_ms_page_8 = 1;
4080 return 0;
4081 }
4082
4083 static struct scsi_host_template gdth_template = {
4084 .name = "GDT SCSI Disk Array Controller",
4085 .info = gdth_info,
4086 .queuecommand = gdth_queuecommand,
4087 .eh_bus_reset_handler = gdth_eh_bus_reset,
4088 .slave_configure = gdth_slave_configure,
4089 .bios_param = gdth_bios_param,
4090 .show_info = gdth_show_info,
4091 .write_info = gdth_set_info,
4092 .eh_timed_out = gdth_timed_out,
4093 .proc_name = "gdth",
4094 .can_queue = GDTH_MAXCMDS,
4095 .this_id = -1,
4096 .sg_tablesize = GDTH_MAXSG,
4097 .cmd_per_lun = GDTH_MAXC_P_L,
4098 .unchecked_isa_dma = 1,
4099 .no_write_same = 1,
4100 };
4101
4102 static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
4103 {
4104 struct Scsi_Host *shp;
4105 gdth_ha_str *ha;
4106 dma_addr_t scratch_dma_handle = 0;
4107 int error, i;
4108 struct pci_dev *pdev = pcistr->pdev;
4109
4110 *ha_out = NULL;
4111
4112 shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
4113 if (!shp)
4114 return -ENOMEM;
4115 ha = shost_priv(shp);
4116
4117 error = -ENODEV;
4118 if (!gdth_init_pci(pdev, pcistr, ha))
4119 goto out_host_put;
4120
4121 /* controller found and initialized */
4122 printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
4123 pdev->bus->number,
4124 PCI_SLOT(pdev->devfn),
4125 ha->irq);
4126
4127 error = request_irq(ha->irq, gdth_interrupt,
4128 IRQF_SHARED, "gdth", ha);
4129 if (error) {
4130 printk("GDT-PCI: Unable to allocate IRQ\n");
4131 goto out_host_put;
4132 }
4133
4134 shp->unchecked_isa_dma = 0;
4135 shp->irq = ha->irq;
4136 shp->dma_channel = 0xff;
4137
4138 ha->hanum = gdth_ctr_count++;
4139 ha->shost = shp;
4140
4141 ha->pccb = &ha->cmdext;
4142 ha->ccb_phys = 0L;
4143
4144 error = -ENOMEM;
4145
4146 ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4147 &scratch_dma_handle, GFP_KERNEL);
4148 if (!ha->pscratch)
4149 goto out_free_irq;
4150 ha->scratch_phys = scratch_dma_handle;
4151
4152 ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4153 &scratch_dma_handle, GFP_KERNEL);
4154 if (!ha->pmsg)
4155 goto out_free_pscratch;
4156 ha->msg_phys = scratch_dma_handle;
4157
4158 ha->scratch_busy = FALSE;
4159 ha->req_first = NULL;
4160 ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
4161 if (max_ids > 0 && max_ids < ha->tid_cnt)
4162 ha->tid_cnt = max_ids;
4163 for (i = 0; i < GDTH_MAXCMDS; ++i)
4164 ha->cmd_tab[i].cmnd = UNUSED_CMND;
4165 ha->scan_mode = rescan ? 0x10 : 0;
4166
4167 error = -ENODEV;
4168 if (!gdth_search_drives(ha)) {
4169 printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
4170 goto out_free_pmsg;
4171 }
4172
4173 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
4174 hdr_channel = ha->bus_cnt;
4175 ha->virt_bus = hdr_channel;
4176
4177 /* 64-bit DMA only supported from FW >= x.43 */
4178 if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
4179 !ha->dma64_support) {
4180 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4181 printk(KERN_WARNING "GDT-PCI %d: "
4182 "Unable to set 32-bit DMA\n", ha->hanum);
4183 goto out_free_pmsg;
4184 }
4185 } else {
4186 shp->max_cmd_len = 16;
4187 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4188 printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
4189 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4190 printk(KERN_WARNING "GDT-PCI %d: "
4191 "Unable to set 64/32-bit DMA\n", ha->hanum);
4192 goto out_free_pmsg;
4193 }
4194 }
4195
4196 shp->max_id = ha->tid_cnt;
4197 shp->max_lun = MAXLUN;
4198 shp->max_channel = ha->bus_cnt;
4199
4200 spin_lock_init(&ha->smp_lock);
4201 gdth_enable_int(ha);
4202
4203 error = scsi_add_host(shp, &pdev->dev);
4204 if (error)
4205 goto out_free_pmsg;
4206 list_add_tail(&ha->list, &gdth_instances);
4207
4208 pci_set_drvdata(ha->pdev, ha);
4209 gdth_timer_init();
4210
4211 scsi_scan_host(shp);
4212
4213 *ha_out = ha;
4214
4215 return 0;
4216
4217 out_free_pmsg:
4218 dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4219 ha->pmsg, ha->msg_phys);
4220 out_free_pscratch:
4221 dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4222 ha->pscratch, ha->scratch_phys);
4223 out_free_irq:
4224 free_irq(ha->irq, ha);
4225 gdth_ctr_count--;
4226 out_host_put:
4227 scsi_host_put(shp);
4228 return error;
4229 }
4230
4231 static void gdth_remove_one(gdth_ha_str *ha)
4232 {
4233 struct Scsi_Host *shp = ha->shost;
4234
4235 TRACE2(("gdth_remove_one()\n"));
4236
4237 scsi_remove_host(shp);
4238
4239 gdth_flush(ha);
4240
4241 if (ha->sdev) {
4242 scsi_free_host_dev(ha->sdev);
4243 ha->sdev = NULL;
4244 }
4245
4246 if (shp->irq)
4247 free_irq(shp->irq,ha);
4248
4249 if (ha->pscratch)
4250 dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
4251 ha->pscratch, ha->scratch_phys);
4252 if (ha->pmsg)
4253 dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
4254 ha->pmsg, ha->msg_phys);
4255 if (ha->ccb_phys)
4256 dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
4257 sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
4258
4259 scsi_host_put(shp);
4260 }
4261
4262 static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
4263 {
4264 gdth_ha_str *ha;
4265
4266 TRACE2(("gdth_halt() event %d\n", (int)event));
4267 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
4268 return NOTIFY_DONE;
4269
4270 list_for_each_entry(ha, &gdth_instances, list)
4271 gdth_flush(ha);
4272
4273 return NOTIFY_OK;
4274 }
4275
4276 static struct notifier_block gdth_notifier = {
4277 gdth_halt, NULL, 0
4278 };
4279
4280 static int __init gdth_init(void)
4281 {
4282 if (disable) {
4283 printk("GDT-HA: Controller driver disabled from"
4284 " command line !\n");
4285 return 0;
4286 }
4287
4288 printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
4289 GDTH_VERSION_STR);
4290
4291 /* initializations */
4292 gdth_polling = TRUE;
4293 gdth_clear_events();
4294 timer_setup(&gdth_timer, gdth_timeout, 0);
4295
4296 /* scanning for PCI controllers */
4297 if (pci_register_driver(&gdth_pci_driver)) {
4298 gdth_ha_str *ha;
4299
4300 list_for_each_entry(ha, &gdth_instances, list)
4301 gdth_remove_one(ha);
4302 return -ENODEV;
4303 }
4304
4305 TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
4306
4307 major = register_chrdev(0,"gdth", &gdth_fops);
4308 register_reboot_notifier(&gdth_notifier);
4309 gdth_polling = FALSE;
4310 return 0;
4311 }
4312
4313 static void __exit gdth_exit(void)
4314 {
4315 gdth_ha_str *ha;
4316
4317 unregister_chrdev(major, "gdth");
4318 unregister_reboot_notifier(&gdth_notifier);
4319
4320 #ifdef GDTH_STATISTICS
4321 del_timer_sync(&gdth_timer);
4322 #endif
4323
4324 pci_unregister_driver(&gdth_pci_driver);
4325
4326 list_for_each_entry(ha, &gdth_instances, list)
4327 gdth_remove_one(ha);
4328 }
4329
4330 module_init(gdth_init);
4331 module_exit(gdth_exit);
4332
4333 #ifndef MODULE
4334 __setup("gdth=", option_setup);
4335 #endif