]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/aacraid/aachba.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / aacraid / aachba.c
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 * Module Name:
27 * aachba.c
28 *
29 * Abstract: Contains Interfaces to manage IOs.
30 *
31 */
32
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/uaccess.h>
42 #include <linux/highmem.h> /* For flush_kernel_dcache_page */
43 #include <linux/module.h>
44
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49
50 #include "aacraid.h"
51
52 /* values for inqd_pdt: Peripheral device type in plain English */
53 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
54 #define INQD_PDT_PROC 0x03 /* Processor device */
55 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
56 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
57 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
58 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
59
60 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
61 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
62
63 /*
64 * Sense codes
65 */
66
67 #define SENCODE_NO_SENSE 0x00
68 #define SENCODE_END_OF_DATA 0x00
69 #define SENCODE_BECOMING_READY 0x04
70 #define SENCODE_INIT_CMD_REQUIRED 0x04
71 #define SENCODE_UNRECOVERED_READ_ERROR 0x11
72 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
73 #define SENCODE_INVALID_COMMAND 0x20
74 #define SENCODE_LBA_OUT_OF_RANGE 0x21
75 #define SENCODE_INVALID_CDB_FIELD 0x24
76 #define SENCODE_LUN_NOT_SUPPORTED 0x25
77 #define SENCODE_INVALID_PARAM_FIELD 0x26
78 #define SENCODE_PARAM_NOT_SUPPORTED 0x26
79 #define SENCODE_PARAM_VALUE_INVALID 0x26
80 #define SENCODE_RESET_OCCURRED 0x29
81 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
82 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F
83 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
84 #define SENCODE_DIAGNOSTIC_FAILURE 0x40
85 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44
86 #define SENCODE_INVALID_MESSAGE_ERROR 0x49
87 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
88 #define SENCODE_OVERLAPPED_COMMAND 0x4E
89
90 /*
91 * Additional sense codes
92 */
93
94 #define ASENCODE_NO_SENSE 0x00
95 #define ASENCODE_END_OF_DATA 0x05
96 #define ASENCODE_BECOMING_READY 0x01
97 #define ASENCODE_INIT_CMD_REQUIRED 0x02
98 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
99 #define ASENCODE_INVALID_COMMAND 0x00
100 #define ASENCODE_LBA_OUT_OF_RANGE 0x00
101 #define ASENCODE_INVALID_CDB_FIELD 0x00
102 #define ASENCODE_LUN_NOT_SUPPORTED 0x00
103 #define ASENCODE_INVALID_PARAM_FIELD 0x00
104 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01
105 #define ASENCODE_PARAM_VALUE_INVALID 0x02
106 #define ASENCODE_RESET_OCCURRED 0x00
107 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
108 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03
109 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
110 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80
111 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
112 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00
113 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
114 #define ASENCODE_OVERLAPPED_COMMAND 0x00
115
116 #define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
117
118 #define BYTE0(x) (unsigned char)(x)
119 #define BYTE1(x) (unsigned char)((x) >> 8)
120 #define BYTE2(x) (unsigned char)((x) >> 16)
121 #define BYTE3(x) (unsigned char)((x) >> 24)
122
123 /* MODE_SENSE data format */
124 typedef struct {
125 struct {
126 u8 data_length;
127 u8 med_type;
128 u8 dev_par;
129 u8 bd_length;
130 } __attribute__((packed)) hd;
131 struct {
132 u8 dens_code;
133 u8 block_count[3];
134 u8 reserved;
135 u8 block_length[3];
136 } __attribute__((packed)) bd;
137 u8 mpc_buf[3];
138 } __attribute__((packed)) aac_modep_data;
139
140 /* MODE_SENSE_10 data format */
141 typedef struct {
142 struct {
143 u8 data_length[2];
144 u8 med_type;
145 u8 dev_par;
146 u8 rsrvd[2];
147 u8 bd_length[2];
148 } __attribute__((packed)) hd;
149 struct {
150 u8 dens_code;
151 u8 block_count[3];
152 u8 reserved;
153 u8 block_length[3];
154 } __attribute__((packed)) bd;
155 u8 mpc_buf[3];
156 } __attribute__((packed)) aac_modep10_data;
157
158 /*------------------------------------------------------------------------------
159 * S T R U C T S / T Y P E D E F S
160 *----------------------------------------------------------------------------*/
161 /* SCSI inquiry data */
162 struct inquiry_data {
163 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
164 u8 inqd_dtq; /* RMB | Device Type Qualifier */
165 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
166 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
167 u8 inqd_len; /* Additional length (n-4) */
168 u8 inqd_pad1[2];/* Reserved - must be zero */
169 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
170 u8 inqd_vid[8]; /* Vendor ID */
171 u8 inqd_pid[16];/* Product ID */
172 u8 inqd_prl[4]; /* Product Revision Level */
173 };
174
175 /* Added for VPD 0x83 */
176 struct tvpd_id_descriptor_type_1 {
177 u8 codeset:4; /* VPD_CODE_SET */
178 u8 reserved:4;
179 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
180 u8 reserved2:4;
181 u8 reserved3;
182 u8 identifierlength;
183 u8 venid[8];
184 u8 productid[16];
185 u8 serialnumber[8]; /* SN in ASCII */
186
187 };
188
189 struct tvpd_id_descriptor_type_2 {
190 u8 codeset:4; /* VPD_CODE_SET */
191 u8 reserved:4;
192 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
193 u8 reserved2:4;
194 u8 reserved3;
195 u8 identifierlength;
196 struct teu64id {
197 u32 Serial;
198 /* The serial number supposed to be 40 bits,
199 * bit we only support 32, so make the last byte zero. */
200 u8 reserved;
201 u8 venid[3];
202 } eu64id;
203
204 };
205
206 struct tvpd_id_descriptor_type_3 {
207 u8 codeset : 4; /* VPD_CODE_SET */
208 u8 reserved : 4;
209 u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */
210 u8 reserved2 : 4;
211 u8 reserved3;
212 u8 identifierlength;
213 u8 Identifier[16];
214 };
215
216 struct tvpd_page83 {
217 u8 DeviceType:5;
218 u8 DeviceTypeQualifier:3;
219 u8 PageCode;
220 u8 reserved;
221 u8 PageLength;
222 struct tvpd_id_descriptor_type_1 type1;
223 struct tvpd_id_descriptor_type_2 type2;
224 struct tvpd_id_descriptor_type_3 type3;
225 };
226
227 /*
228 * M O D U L E G L O B A L S
229 */
230
231 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
232 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
233 static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
234 static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
235 struct aac_raw_io2 *rio2, int sg_max);
236 static long aac_build_sghba(struct scsi_cmnd *scsicmd,
237 struct aac_hba_cmd_req *hbacmd,
238 int sg_max, u64 sg_address);
239 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
240 int pages, int nseg, int nseg_new);
241 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
242 static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
243 #ifdef AAC_DETAILED_STATUS_INFO
244 static char *aac_get_status_string(u32 status);
245 #endif
246
247 /*
248 * Non dasd selection is handled entirely in aachba now
249 */
250
251 static int nondasd = -1;
252 static int aac_cache = 2; /* WCE=0 to avoid performance problems */
253 static int dacmode = -1;
254 int aac_msi;
255 int aac_commit = -1;
256 int startup_timeout = 180;
257 int aif_timeout = 120;
258 int aac_sync_mode; /* Only Sync. transfer - disabled */
259 int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
260
261 module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
262 MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
263 " 0=off, 1=on");
264 module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
265 MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
266 " 0=off, 1=on");
267 module_param(nondasd, int, S_IRUGO|S_IWUSR);
268 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
269 " 0=off, 1=on");
270 module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
271 MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
272 "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
273 "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
274 "\tbit 2 - Disable only if Battery is protecting Cache");
275 module_param(dacmode, int, S_IRUGO|S_IWUSR);
276 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
277 " 0=off, 1=on");
278 module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
279 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
280 " adapter for foreign arrays.\n"
281 "This is typically needed in systems that do not have a BIOS."
282 " 0=off, 1=on");
283 module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
284 MODULE_PARM_DESC(msi, "IRQ handling."
285 " 0=PIC(default), 1=MSI, 2=MSI-X)");
286 module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
287 MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
288 " adapter to have it's kernel up and\n"
289 "running. This is typically adjusted for large systems that do not"
290 " have a BIOS.");
291 module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
292 MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
293 " applications to pick up AIFs before\n"
294 "deregistering them. This is typically adjusted for heavily burdened"
295 " systems.");
296
297 int numacb = -1;
298 module_param(numacb, int, S_IRUGO|S_IWUSR);
299 MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
300 " blocks (FIB) allocated. Valid values are 512 and down. Default is"
301 " to use suggestion from Firmware.");
302
303 int acbsize = -1;
304 module_param(acbsize, int, S_IRUGO|S_IWUSR);
305 MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
306 " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
307 " suggestion from Firmware.");
308
309 int update_interval = 30 * 60;
310 module_param(update_interval, int, S_IRUGO|S_IWUSR);
311 MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
312 " updates issued to adapter.");
313
314 int check_interval = 24 * 60 * 60;
315 module_param(check_interval, int, S_IRUGO|S_IWUSR);
316 MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
317 " checks.");
318
319 int aac_check_reset = 1;
320 module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
321 MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
322 " adapter. a value of -1 forces the reset to adapters programmed to"
323 " ignore it.");
324
325 int expose_physicals = -1;
326 module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
327 MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
328 " -1=protect 0=off, 1=on");
329
330 int aac_reset_devices;
331 module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
332 MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
333
334 int aac_wwn = 1;
335 module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
336 MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
337 "\t0 - Disable\n"
338 "\t1 - Array Meta Data Signature (default)\n"
339 "\t2 - Adapter Serial Number");
340
341
342 static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
343 struct fib *fibptr) {
344 struct scsi_device *device;
345
346 if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
347 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
348 aac_fib_complete(fibptr);
349 return 0;
350 }
351 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
352 device = scsicmd->device;
353 if (unlikely(!device)) {
354 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
355 aac_fib_complete(fibptr);
356 return 0;
357 }
358 return 1;
359 }
360
361 /**
362 * aac_get_config_status - check the adapter configuration
363 * @common: adapter to query
364 *
365 * Query config status, and commit the configuration if needed.
366 */
367 int aac_get_config_status(struct aac_dev *dev, int commit_flag)
368 {
369 int status = 0;
370 struct fib * fibptr;
371
372 if (!(fibptr = aac_fib_alloc(dev)))
373 return -ENOMEM;
374
375 aac_fib_init(fibptr);
376 {
377 struct aac_get_config_status *dinfo;
378 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
379
380 dinfo->command = cpu_to_le32(VM_ContainerConfig);
381 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
382 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
383 }
384
385 status = aac_fib_send(ContainerCommand,
386 fibptr,
387 sizeof (struct aac_get_config_status),
388 FsaNormal,
389 1, 1,
390 NULL, NULL);
391 if (status < 0) {
392 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
393 } else {
394 struct aac_get_config_status_resp *reply
395 = (struct aac_get_config_status_resp *) fib_data(fibptr);
396 dprintk((KERN_WARNING
397 "aac_get_config_status: response=%d status=%d action=%d\n",
398 le32_to_cpu(reply->response),
399 le32_to_cpu(reply->status),
400 le32_to_cpu(reply->data.action)));
401 if ((le32_to_cpu(reply->response) != ST_OK) ||
402 (le32_to_cpu(reply->status) != CT_OK) ||
403 (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
404 printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
405 status = -EINVAL;
406 }
407 }
408 /* Do not set XferState to zero unless receives a response from F/W */
409 if (status >= 0)
410 aac_fib_complete(fibptr);
411
412 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
413 if (status >= 0) {
414 if ((aac_commit == 1) || commit_flag) {
415 struct aac_commit_config * dinfo;
416 aac_fib_init(fibptr);
417 dinfo = (struct aac_commit_config *) fib_data(fibptr);
418
419 dinfo->command = cpu_to_le32(VM_ContainerConfig);
420 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
421
422 status = aac_fib_send(ContainerCommand,
423 fibptr,
424 sizeof (struct aac_commit_config),
425 FsaNormal,
426 1, 1,
427 NULL, NULL);
428 /* Do not set XferState to zero unless
429 * receives a response from F/W */
430 if (status >= 0)
431 aac_fib_complete(fibptr);
432 } else if (aac_commit == 0) {
433 printk(KERN_WARNING
434 "aac_get_config_status: Foreign device configurations are being ignored\n");
435 }
436 }
437 /* FIB should be freed only after getting the response from the F/W */
438 if (status != -ERESTARTSYS)
439 aac_fib_free(fibptr);
440 return status;
441 }
442
443 static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
444 {
445 char inq_data;
446 scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
447 if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
448 inq_data &= 0xdf;
449 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
450 }
451 }
452
453 /**
454 * aac_get_containers - list containers
455 * @common: adapter to probe
456 *
457 * Make a list of all containers on this controller
458 */
459 int aac_get_containers(struct aac_dev *dev)
460 {
461 struct fsa_dev_info *fsa_dev_ptr;
462 u32 index;
463 int status = 0;
464 struct fib * fibptr;
465 struct aac_get_container_count *dinfo;
466 struct aac_get_container_count_resp *dresp;
467 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
468
469 if (!(fibptr = aac_fib_alloc(dev)))
470 return -ENOMEM;
471
472 aac_fib_init(fibptr);
473 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
474 dinfo->command = cpu_to_le32(VM_ContainerConfig);
475 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
476
477 status = aac_fib_send(ContainerCommand,
478 fibptr,
479 sizeof (struct aac_get_container_count),
480 FsaNormal,
481 1, 1,
482 NULL, NULL);
483 if (status >= 0) {
484 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
485 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
486 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
487 AAC_OPTION_SUPPORTED_240_VOLUMES) {
488 maximum_num_containers =
489 le32_to_cpu(dresp->MaxSimpleVolumes);
490 }
491 aac_fib_complete(fibptr);
492 }
493 /* FIB should be freed only after getting the response from the F/W */
494 if (status != -ERESTARTSYS)
495 aac_fib_free(fibptr);
496
497 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
498 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
499 if (dev->fsa_dev == NULL ||
500 dev->maximum_num_containers != maximum_num_containers) {
501
502 fsa_dev_ptr = dev->fsa_dev;
503
504 dev->fsa_dev = kcalloc(maximum_num_containers,
505 sizeof(*fsa_dev_ptr), GFP_KERNEL);
506
507 kfree(fsa_dev_ptr);
508 fsa_dev_ptr = NULL;
509
510
511 if (!dev->fsa_dev)
512 return -ENOMEM;
513
514 dev->maximum_num_containers = maximum_num_containers;
515 }
516 for (index = 0; index < dev->maximum_num_containers; index++) {
517 dev->fsa_dev[index].devname[0] = '\0';
518 dev->fsa_dev[index].valid = 0;
519
520 status = aac_probe_container(dev, index);
521
522 if (status < 0) {
523 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
524 break;
525 }
526 }
527 return status;
528 }
529
530 static void get_container_name_callback(void *context, struct fib * fibptr)
531 {
532 struct aac_get_name_resp * get_name_reply;
533 struct scsi_cmnd * scsicmd;
534
535 scsicmd = (struct scsi_cmnd *) context;
536
537 if (!aac_valid_context(scsicmd, fibptr))
538 return;
539
540 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
541 BUG_ON(fibptr == NULL);
542
543 get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
544 /* Failure is irrelevant, using default value instead */
545 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
546 && (get_name_reply->data[0] != '\0')) {
547 char *sp = get_name_reply->data;
548 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
549 while (*sp == ' ')
550 ++sp;
551 if (*sp) {
552 struct inquiry_data inq;
553 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
554 int count = sizeof(d);
555 char *dp = d;
556 do {
557 *dp++ = (*sp) ? *sp++ : ' ';
558 } while (--count > 0);
559
560 scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
561 memcpy(inq.inqd_pid, d, sizeof(d));
562 scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
563 }
564 }
565
566 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
567
568 aac_fib_complete(fibptr);
569 scsicmd->scsi_done(scsicmd);
570 }
571
572 /**
573 * aac_get_container_name - get container name, none blocking.
574 */
575 static int aac_get_container_name(struct scsi_cmnd * scsicmd)
576 {
577 int status;
578 struct aac_get_name *dinfo;
579 struct fib * cmd_fibcontext;
580 struct aac_dev * dev;
581
582 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
583
584 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
585
586 aac_fib_init(cmd_fibcontext);
587 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
588
589 dinfo->command = cpu_to_le32(VM_ContainerConfig);
590 dinfo->type = cpu_to_le32(CT_READ_NAME);
591 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
592 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
593
594 status = aac_fib_send(ContainerCommand,
595 cmd_fibcontext,
596 sizeof(struct aac_get_name_resp),
597 FsaNormal,
598 0, 1,
599 (fib_callback)get_container_name_callback,
600 (void *) scsicmd);
601
602 /*
603 * Check that the command queued to the controller
604 */
605 if (status == -EINPROGRESS) {
606 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
607 return 0;
608 }
609
610 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
611 aac_fib_complete(cmd_fibcontext);
612 return -1;
613 }
614
615 static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
616 {
617 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
618
619 if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
620 return aac_scsi_cmd(scsicmd);
621
622 scsicmd->result = DID_NO_CONNECT << 16;
623 scsicmd->scsi_done(scsicmd);
624 return 0;
625 }
626
627 static void _aac_probe_container2(void * context, struct fib * fibptr)
628 {
629 struct fsa_dev_info *fsa_dev_ptr;
630 int (*callback)(struct scsi_cmnd *);
631 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
632 int i;
633
634
635 if (!aac_valid_context(scsicmd, fibptr))
636 return;
637
638 scsicmd->SCp.Status = 0;
639 fsa_dev_ptr = fibptr->dev->fsa_dev;
640 if (fsa_dev_ptr) {
641 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
642 fsa_dev_ptr += scmd_id(scsicmd);
643
644 if ((le32_to_cpu(dresp->status) == ST_OK) &&
645 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
646 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
647 if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
648 AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
649 dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
650 fsa_dev_ptr->block_size = 0x200;
651 } else {
652 fsa_dev_ptr->block_size =
653 le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
654 }
655 for (i = 0; i < 16; i++)
656 fsa_dev_ptr->identifier[i] =
657 dresp->mnt[0].fileinfo.bdevinfo
658 .identifier[i];
659 fsa_dev_ptr->valid = 1;
660 /* sense_key holds the current state of the spin-up */
661 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
662 fsa_dev_ptr->sense_data.sense_key = NOT_READY;
663 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
664 fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
665 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
666 fsa_dev_ptr->size
667 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
668 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
669 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
670 }
671 if ((fsa_dev_ptr->valid & 1) == 0)
672 fsa_dev_ptr->valid = 0;
673 scsicmd->SCp.Status = le32_to_cpu(dresp->count);
674 }
675 aac_fib_complete(fibptr);
676 aac_fib_free(fibptr);
677 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
678 scsicmd->SCp.ptr = NULL;
679 (*callback)(scsicmd);
680 return;
681 }
682
683 static void _aac_probe_container1(void * context, struct fib * fibptr)
684 {
685 struct scsi_cmnd * scsicmd;
686 struct aac_mount * dresp;
687 struct aac_query_mount *dinfo;
688 int status;
689
690 dresp = (struct aac_mount *) fib_data(fibptr);
691 if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
692 AAC_OPTION_VARIABLE_BLOCK_SIZE))
693 dresp->mnt[0].capacityhigh = 0;
694 if ((le32_to_cpu(dresp->status) != ST_OK) ||
695 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
696 _aac_probe_container2(context, fibptr);
697 return;
698 }
699 scsicmd = (struct scsi_cmnd *) context;
700
701 if (!aac_valid_context(scsicmd, fibptr))
702 return;
703
704 aac_fib_init(fibptr);
705
706 dinfo = (struct aac_query_mount *)fib_data(fibptr);
707
708 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
709 AAC_OPTION_VARIABLE_BLOCK_SIZE)
710 dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
711 else
712 dinfo->command = cpu_to_le32(VM_NameServe64);
713
714 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
715 dinfo->type = cpu_to_le32(FT_FILESYS);
716
717 status = aac_fib_send(ContainerCommand,
718 fibptr,
719 sizeof(struct aac_query_mount),
720 FsaNormal,
721 0, 1,
722 _aac_probe_container2,
723 (void *) scsicmd);
724 /*
725 * Check that the command queued to the controller
726 */
727 if (status == -EINPROGRESS)
728 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
729 else if (status < 0) {
730 /* Inherit results from VM_NameServe, if any */
731 dresp->status = cpu_to_le32(ST_OK);
732 _aac_probe_container2(context, fibptr);
733 }
734 }
735
736 static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
737 {
738 struct fib * fibptr;
739 int status = -ENOMEM;
740
741 if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
742 struct aac_query_mount *dinfo;
743
744 aac_fib_init(fibptr);
745
746 dinfo = (struct aac_query_mount *)fib_data(fibptr);
747
748 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
749 AAC_OPTION_VARIABLE_BLOCK_SIZE)
750 dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
751 else
752 dinfo->command = cpu_to_le32(VM_NameServe);
753
754 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
755 dinfo->type = cpu_to_le32(FT_FILESYS);
756 scsicmd->SCp.ptr = (char *)callback;
757
758 status = aac_fib_send(ContainerCommand,
759 fibptr,
760 sizeof(struct aac_query_mount),
761 FsaNormal,
762 0, 1,
763 _aac_probe_container1,
764 (void *) scsicmd);
765 /*
766 * Check that the command queued to the controller
767 */
768 if (status == -EINPROGRESS) {
769 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
770 return 0;
771 }
772 if (status < 0) {
773 scsicmd->SCp.ptr = NULL;
774 aac_fib_complete(fibptr);
775 aac_fib_free(fibptr);
776 }
777 }
778 if (status < 0) {
779 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
780 if (fsa_dev_ptr) {
781 fsa_dev_ptr += scmd_id(scsicmd);
782 if ((fsa_dev_ptr->valid & 1) == 0) {
783 fsa_dev_ptr->valid = 0;
784 return (*callback)(scsicmd);
785 }
786 }
787 }
788 return status;
789 }
790
791 /**
792 * aac_probe_container - query a logical volume
793 * @dev: device to query
794 * @cid: container identifier
795 *
796 * Queries the controller about the given volume. The volume information
797 * is updated in the struct fsa_dev_info structure rather than returned.
798 */
799 static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
800 {
801 scsicmd->device = NULL;
802 return 0;
803 }
804
805 int aac_probe_container(struct aac_dev *dev, int cid)
806 {
807 struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
808 struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
809 int status;
810
811 if (!scsicmd || !scsidev) {
812 kfree(scsicmd);
813 kfree(scsidev);
814 return -ENOMEM;
815 }
816 scsicmd->list.next = NULL;
817 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
818
819 scsicmd->device = scsidev;
820 scsidev->sdev_state = 0;
821 scsidev->id = cid;
822 scsidev->host = dev->scsi_host_ptr;
823
824 if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
825 while (scsicmd->device == scsidev)
826 schedule();
827 kfree(scsidev);
828 status = scsicmd->SCp.Status;
829 kfree(scsicmd);
830 return status;
831 }
832
833 /* Local Structure to set SCSI inquiry data strings */
834 struct scsi_inq {
835 char vid[8]; /* Vendor ID */
836 char pid[16]; /* Product ID */
837 char prl[4]; /* Product Revision Level */
838 };
839
840 /**
841 * InqStrCopy - string merge
842 * @a: string to copy from
843 * @b: string to copy to
844 *
845 * Copy a String from one location to another
846 * without copying \0
847 */
848
849 static void inqstrcpy(char *a, char *b)
850 {
851
852 while (*a != (char)0)
853 *b++ = *a++;
854 }
855
856 static char *container_types[] = {
857 "None",
858 "Volume",
859 "Mirror",
860 "Stripe",
861 "RAID5",
862 "SSRW",
863 "SSRO",
864 "Morph",
865 "Legacy",
866 "RAID4",
867 "RAID10",
868 "RAID00",
869 "V-MIRRORS",
870 "PSEUDO R4",
871 "RAID50",
872 "RAID5D",
873 "RAID5D0",
874 "RAID1E",
875 "RAID6",
876 "RAID60",
877 "Unknown"
878 };
879
880 char * get_container_type(unsigned tindex)
881 {
882 if (tindex >= ARRAY_SIZE(container_types))
883 tindex = ARRAY_SIZE(container_types) - 1;
884 return container_types[tindex];
885 }
886
887 /* Function: setinqstr
888 *
889 * Arguments: [1] pointer to void [1] int
890 *
891 * Purpose: Sets SCSI inquiry data strings for vendor, product
892 * and revision level. Allows strings to be set in platform dependent
893 * files instead of in OS dependent driver source.
894 */
895
896 static void setinqstr(struct aac_dev *dev, void *data, int tindex)
897 {
898 struct scsi_inq *str;
899
900 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
901 memset(str, ' ', sizeof(*str));
902
903 if (dev->supplement_adapter_info.AdapterTypeText[0]) {
904 char * cp = dev->supplement_adapter_info.AdapterTypeText;
905 int c;
906 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
907 inqstrcpy("SMC", str->vid);
908 else {
909 c = sizeof(str->vid);
910 while (*cp && *cp != ' ' && --c)
911 ++cp;
912 c = *cp;
913 *cp = '\0';
914 inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
915 str->vid);
916 *cp = c;
917 while (*cp && *cp != ' ')
918 ++cp;
919 }
920 while (*cp == ' ')
921 ++cp;
922 /* last six chars reserved for vol type */
923 c = 0;
924 if (strlen(cp) > sizeof(str->pid)) {
925 c = cp[sizeof(str->pid)];
926 cp[sizeof(str->pid)] = '\0';
927 }
928 inqstrcpy (cp, str->pid);
929 if (c)
930 cp[sizeof(str->pid)] = c;
931 } else {
932 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
933
934 inqstrcpy (mp->vname, str->vid);
935 /* last six chars reserved for vol type */
936 inqstrcpy (mp->model, str->pid);
937 }
938
939 if (tindex < ARRAY_SIZE(container_types)){
940 char *findit = str->pid;
941
942 for ( ; *findit != ' '; findit++); /* walk till we find a space */
943 /* RAID is superfluous in the context of a RAID device */
944 if (memcmp(findit-4, "RAID", 4) == 0)
945 *(findit -= 4) = ' ';
946 if (((findit - str->pid) + strlen(container_types[tindex]))
947 < (sizeof(str->pid) + sizeof(str->prl)))
948 inqstrcpy (container_types[tindex], findit + 1);
949 }
950 inqstrcpy ("V1.0", str->prl);
951 }
952
953 static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
954 struct aac_dev *dev, struct scsi_cmnd *scsicmd)
955 {
956 int container;
957
958 vpdpage83data->type3.codeset = 1;
959 vpdpage83data->type3.identifiertype = 3;
960 vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
961 - 4;
962
963 for (container = 0; container < dev->maximum_num_containers;
964 container++) {
965
966 if (scmd_id(scsicmd) == container) {
967 memcpy(vpdpage83data->type3.Identifier,
968 dev->fsa_dev[container].identifier,
969 16);
970 break;
971 }
972 }
973 }
974
975 static void get_container_serial_callback(void *context, struct fib * fibptr)
976 {
977 struct aac_get_serial_resp * get_serial_reply;
978 struct scsi_cmnd * scsicmd;
979
980 BUG_ON(fibptr == NULL);
981
982 scsicmd = (struct scsi_cmnd *) context;
983 if (!aac_valid_context(scsicmd, fibptr))
984 return;
985
986 get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
987 /* Failure is irrelevant, using default value instead */
988 if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
989 /*Check to see if it's for VPD 0x83 or 0x80 */
990 if (scsicmd->cmnd[2] == 0x83) {
991 /* vpd page 0x83 - Device Identification Page */
992 struct aac_dev *dev;
993 int i;
994 struct tvpd_page83 vpdpage83data;
995
996 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
997
998 memset(((u8 *)&vpdpage83data), 0,
999 sizeof(vpdpage83data));
1000
1001 /* DIRECT_ACCESS_DEVIC */
1002 vpdpage83data.DeviceType = 0;
1003 /* DEVICE_CONNECTED */
1004 vpdpage83data.DeviceTypeQualifier = 0;
1005 /* VPD_DEVICE_IDENTIFIERS */
1006 vpdpage83data.PageCode = 0x83;
1007 vpdpage83data.reserved = 0;
1008 vpdpage83data.PageLength =
1009 sizeof(vpdpage83data.type1) +
1010 sizeof(vpdpage83data.type2);
1011
1012 /* VPD 83 Type 3 is not supported for ARC */
1013 if (dev->sa_firmware)
1014 vpdpage83data.PageLength +=
1015 sizeof(vpdpage83data.type3);
1016
1017 /* T10 Vendor Identifier Field Format */
1018 /* VpdcodesetAscii */
1019 vpdpage83data.type1.codeset = 2;
1020 /* VpdIdentifierTypeVendorId */
1021 vpdpage83data.type1.identifiertype = 1;
1022 vpdpage83data.type1.identifierlength =
1023 sizeof(vpdpage83data.type1) - 4;
1024
1025 /* "ADAPTEC " for adaptec */
1026 memcpy(vpdpage83data.type1.venid,
1027 "ADAPTEC ",
1028 sizeof(vpdpage83data.type1.venid));
1029 memcpy(vpdpage83data.type1.productid,
1030 "ARRAY ",
1031 sizeof(
1032 vpdpage83data.type1.productid));
1033
1034 /* Convert to ascii based serial number.
1035 * The LSB is the the end.
1036 */
1037 for (i = 0; i < 8; i++) {
1038 u8 temp =
1039 (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
1040 if (temp > 0x9) {
1041 vpdpage83data.type1.serialnumber[i] =
1042 'A' + (temp - 0xA);
1043 } else {
1044 vpdpage83data.type1.serialnumber[i] =
1045 '0' + temp;
1046 }
1047 }
1048
1049 /* VpdCodeSetBinary */
1050 vpdpage83data.type2.codeset = 1;
1051 /* VpdidentifiertypeEUI64 */
1052 vpdpage83data.type2.identifiertype = 2;
1053 vpdpage83data.type2.identifierlength =
1054 sizeof(vpdpage83data.type2) - 4;
1055
1056 vpdpage83data.type2.eu64id.venid[0] = 0xD0;
1057 vpdpage83data.type2.eu64id.venid[1] = 0;
1058 vpdpage83data.type2.eu64id.venid[2] = 0;
1059
1060 vpdpage83data.type2.eu64id.Serial =
1061 get_serial_reply->uid;
1062 vpdpage83data.type2.eu64id.reserved = 0;
1063
1064 /*
1065 * VpdIdentifierTypeFCPHName
1066 * VPD 0x83 Type 3 not supported for ARC
1067 */
1068 if (dev->sa_firmware) {
1069 build_vpd83_type3(&vpdpage83data,
1070 dev, scsicmd);
1071 }
1072
1073 /* Move the inquiry data to the response buffer. */
1074 scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1075 sizeof(vpdpage83data));
1076 } else {
1077 /* It must be for VPD 0x80 */
1078 char sp[13];
1079 /* EVPD bit set */
1080 sp[0] = INQD_PDT_DA;
1081 sp[1] = scsicmd->cmnd[2];
1082 sp[2] = 0;
1083 sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
1084 le32_to_cpu(get_serial_reply->uid));
1085 scsi_sg_copy_from_buffer(scsicmd, sp,
1086 sizeof(sp));
1087 }
1088 }
1089
1090 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1091
1092 aac_fib_complete(fibptr);
1093 scsicmd->scsi_done(scsicmd);
1094 }
1095
1096 /**
1097 * aac_get_container_serial - get container serial, none blocking.
1098 */
1099 static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
1100 {
1101 int status;
1102 struct aac_get_serial *dinfo;
1103 struct fib * cmd_fibcontext;
1104 struct aac_dev * dev;
1105
1106 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1107
1108 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
1109
1110 aac_fib_init(cmd_fibcontext);
1111 dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
1112
1113 dinfo->command = cpu_to_le32(VM_ContainerConfig);
1114 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
1115 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1116
1117 status = aac_fib_send(ContainerCommand,
1118 cmd_fibcontext,
1119 sizeof(struct aac_get_serial_resp),
1120 FsaNormal,
1121 0, 1,
1122 (fib_callback) get_container_serial_callback,
1123 (void *) scsicmd);
1124
1125 /*
1126 * Check that the command queued to the controller
1127 */
1128 if (status == -EINPROGRESS) {
1129 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1130 return 0;
1131 }
1132
1133 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
1134 aac_fib_complete(cmd_fibcontext);
1135 return -1;
1136 }
1137
1138 /* Function: setinqserial
1139 *
1140 * Arguments: [1] pointer to void [1] int
1141 *
1142 * Purpose: Sets SCSI Unit Serial number.
1143 * This is a fake. We should read a proper
1144 * serial number from the container. <SuSE>But
1145 * without docs it's quite hard to do it :-)
1146 * So this will have to do in the meantime.</SuSE>
1147 */
1148
1149 static int setinqserial(struct aac_dev *dev, void *data, int cid)
1150 {
1151 /*
1152 * This breaks array migration.
1153 */
1154 return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
1155 le32_to_cpu(dev->adapter_info.serial[0]), cid);
1156 }
1157
1158 static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
1159 u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
1160 {
1161 u8 *sense_buf = (u8 *)sense_data;
1162 /* Sense data valid, err code 70h */
1163 sense_buf[0] = 0x70; /* No info field */
1164 sense_buf[1] = 0; /* Segment number, always zero */
1165
1166 sense_buf[2] = sense_key; /* Sense key */
1167
1168 sense_buf[12] = sense_code; /* Additional sense code */
1169 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
1170
1171 if (sense_key == ILLEGAL_REQUEST) {
1172 sense_buf[7] = 10; /* Additional sense length */
1173
1174 sense_buf[15] = bit_pointer;
1175 /* Illegal parameter is in the parameter block */
1176 if (sense_code == SENCODE_INVALID_CDB_FIELD)
1177 sense_buf[15] |= 0xc0;/* Std sense key specific field */
1178 /* Illegal parameter is in the CDB block */
1179 sense_buf[16] = field_pointer >> 8; /* MSB */
1180 sense_buf[17] = field_pointer; /* LSB */
1181 } else
1182 sense_buf[7] = 6; /* Additional sense length */
1183 }
1184
1185 static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1186 {
1187 if (lba & 0xffffffff00000000LL) {
1188 int cid = scmd_id(cmd);
1189 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1190 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1191 SAM_STAT_CHECK_CONDITION;
1192 set_sense(&dev->fsa_dev[cid].sense_data,
1193 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1194 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1195 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1196 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1197 SCSI_SENSE_BUFFERSIZE));
1198 cmd->scsi_done(cmd);
1199 return 1;
1200 }
1201 return 0;
1202 }
1203
1204 static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
1205 {
1206 return 0;
1207 }
1208
1209 static void io_callback(void *context, struct fib * fibptr);
1210
1211 static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1212 {
1213 struct aac_dev *dev = fib->dev;
1214 u16 fibsize, command;
1215 long ret;
1216
1217 aac_fib_init(fib);
1218 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1219 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1220 !dev->sync_mode) {
1221 struct aac_raw_io2 *readcmd2;
1222 readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1223 memset(readcmd2, 0, sizeof(struct aac_raw_io2));
1224 readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1225 readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1226 readcmd2->byteCount = cpu_to_le32(count *
1227 dev->fsa_dev[scmd_id(cmd)].block_size);
1228 readcmd2->cid = cpu_to_le16(scmd_id(cmd));
1229 readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
1230 ret = aac_build_sgraw2(cmd, readcmd2,
1231 dev->scsi_host_ptr->sg_tablesize);
1232 if (ret < 0)
1233 return ret;
1234 command = ContainerRawIo2;
1235 fibsize = sizeof(struct aac_raw_io2) +
1236 ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1237 } else {
1238 struct aac_raw_io *readcmd;
1239 readcmd = (struct aac_raw_io *) fib_data(fib);
1240 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1241 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1242 readcmd->count = cpu_to_le32(count *
1243 dev->fsa_dev[scmd_id(cmd)].block_size);
1244 readcmd->cid = cpu_to_le16(scmd_id(cmd));
1245 readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
1246 readcmd->bpTotal = 0;
1247 readcmd->bpComplete = 0;
1248 ret = aac_build_sgraw(cmd, &readcmd->sg);
1249 if (ret < 0)
1250 return ret;
1251 command = ContainerRawIo;
1252 fibsize = sizeof(struct aac_raw_io) +
1253 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
1254 }
1255
1256 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1257 /*
1258 * Now send the Fib to the adapter
1259 */
1260 return aac_fib_send(command,
1261 fib,
1262 fibsize,
1263 FsaNormal,
1264 0, 1,
1265 (fib_callback) io_callback,
1266 (void *) cmd);
1267 }
1268
1269 static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1270 {
1271 u16 fibsize;
1272 struct aac_read64 *readcmd;
1273 long ret;
1274
1275 aac_fib_init(fib);
1276 readcmd = (struct aac_read64 *) fib_data(fib);
1277 readcmd->command = cpu_to_le32(VM_CtHostRead64);
1278 readcmd->cid = cpu_to_le16(scmd_id(cmd));
1279 readcmd->sector_count = cpu_to_le16(count);
1280 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1281 readcmd->pad = 0;
1282 readcmd->flags = 0;
1283
1284 ret = aac_build_sg64(cmd, &readcmd->sg);
1285 if (ret < 0)
1286 return ret;
1287 fibsize = sizeof(struct aac_read64) +
1288 ((le32_to_cpu(readcmd->sg.count) - 1) *
1289 sizeof (struct sgentry64));
1290 BUG_ON (fibsize > (fib->dev->max_fib_size -
1291 sizeof(struct aac_fibhdr)));
1292 /*
1293 * Now send the Fib to the adapter
1294 */
1295 return aac_fib_send(ContainerCommand64,
1296 fib,
1297 fibsize,
1298 FsaNormal,
1299 0, 1,
1300 (fib_callback) io_callback,
1301 (void *) cmd);
1302 }
1303
1304 static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
1305 {
1306 u16 fibsize;
1307 struct aac_read *readcmd;
1308 struct aac_dev *dev = fib->dev;
1309 long ret;
1310
1311 aac_fib_init(fib);
1312 readcmd = (struct aac_read *) fib_data(fib);
1313 readcmd->command = cpu_to_le32(VM_CtBlockRead);
1314 readcmd->cid = cpu_to_le32(scmd_id(cmd));
1315 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1316 readcmd->count = cpu_to_le32(count *
1317 dev->fsa_dev[scmd_id(cmd)].block_size);
1318
1319 ret = aac_build_sg(cmd, &readcmd->sg);
1320 if (ret < 0)
1321 return ret;
1322 fibsize = sizeof(struct aac_read) +
1323 ((le32_to_cpu(readcmd->sg.count) - 1) *
1324 sizeof (struct sgentry));
1325 BUG_ON (fibsize > (fib->dev->max_fib_size -
1326 sizeof(struct aac_fibhdr)));
1327 /*
1328 * Now send the Fib to the adapter
1329 */
1330 return aac_fib_send(ContainerCommand,
1331 fib,
1332 fibsize,
1333 FsaNormal,
1334 0, 1,
1335 (fib_callback) io_callback,
1336 (void *) cmd);
1337 }
1338
1339 static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1340 {
1341 struct aac_dev *dev = fib->dev;
1342 u16 fibsize, command;
1343 long ret;
1344
1345 aac_fib_init(fib);
1346 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1347 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1348 !dev->sync_mode) {
1349 struct aac_raw_io2 *writecmd2;
1350 writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1351 memset(writecmd2, 0, sizeof(struct aac_raw_io2));
1352 writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
1353 writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1354 writecmd2->byteCount = cpu_to_le32(count *
1355 dev->fsa_dev[scmd_id(cmd)].block_size);
1356 writecmd2->cid = cpu_to_le16(scmd_id(cmd));
1357 writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
1358 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1359 cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
1360 cpu_to_le16(RIO2_IO_TYPE_WRITE);
1361 ret = aac_build_sgraw2(cmd, writecmd2,
1362 dev->scsi_host_ptr->sg_tablesize);
1363 if (ret < 0)
1364 return ret;
1365 command = ContainerRawIo2;
1366 fibsize = sizeof(struct aac_raw_io2) +
1367 ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
1368 } else {
1369 struct aac_raw_io *writecmd;
1370 writecmd = (struct aac_raw_io *) fib_data(fib);
1371 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1372 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1373 writecmd->count = cpu_to_le32(count *
1374 dev->fsa_dev[scmd_id(cmd)].block_size);
1375 writecmd->cid = cpu_to_le16(scmd_id(cmd));
1376 writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
1377 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
1378 cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
1379 cpu_to_le16(RIO_TYPE_WRITE);
1380 writecmd->bpTotal = 0;
1381 writecmd->bpComplete = 0;
1382 ret = aac_build_sgraw(cmd, &writecmd->sg);
1383 if (ret < 0)
1384 return ret;
1385 command = ContainerRawIo;
1386 fibsize = sizeof(struct aac_raw_io) +
1387 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
1388 }
1389
1390 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
1391 /*
1392 * Now send the Fib to the adapter
1393 */
1394 return aac_fib_send(command,
1395 fib,
1396 fibsize,
1397 FsaNormal,
1398 0, 1,
1399 (fib_callback) io_callback,
1400 (void *) cmd);
1401 }
1402
1403 static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1404 {
1405 u16 fibsize;
1406 struct aac_write64 *writecmd;
1407 long ret;
1408
1409 aac_fib_init(fib);
1410 writecmd = (struct aac_write64 *) fib_data(fib);
1411 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1412 writecmd->cid = cpu_to_le16(scmd_id(cmd));
1413 writecmd->sector_count = cpu_to_le16(count);
1414 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1415 writecmd->pad = 0;
1416 writecmd->flags = 0;
1417
1418 ret = aac_build_sg64(cmd, &writecmd->sg);
1419 if (ret < 0)
1420 return ret;
1421 fibsize = sizeof(struct aac_write64) +
1422 ((le32_to_cpu(writecmd->sg.count) - 1) *
1423 sizeof (struct sgentry64));
1424 BUG_ON (fibsize > (fib->dev->max_fib_size -
1425 sizeof(struct aac_fibhdr)));
1426 /*
1427 * Now send the Fib to the adapter
1428 */
1429 return aac_fib_send(ContainerCommand64,
1430 fib,
1431 fibsize,
1432 FsaNormal,
1433 0, 1,
1434 (fib_callback) io_callback,
1435 (void *) cmd);
1436 }
1437
1438 static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
1439 {
1440 u16 fibsize;
1441 struct aac_write *writecmd;
1442 struct aac_dev *dev = fib->dev;
1443 long ret;
1444
1445 aac_fib_init(fib);
1446 writecmd = (struct aac_write *) fib_data(fib);
1447 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1448 writecmd->cid = cpu_to_le32(scmd_id(cmd));
1449 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1450 writecmd->count = cpu_to_le32(count *
1451 dev->fsa_dev[scmd_id(cmd)].block_size);
1452 writecmd->sg.count = cpu_to_le32(1);
1453 /* ->stable is not used - it did mean which type of write */
1454
1455 ret = aac_build_sg(cmd, &writecmd->sg);
1456 if (ret < 0)
1457 return ret;
1458 fibsize = sizeof(struct aac_write) +
1459 ((le32_to_cpu(writecmd->sg.count) - 1) *
1460 sizeof (struct sgentry));
1461 BUG_ON (fibsize > (fib->dev->max_fib_size -
1462 sizeof(struct aac_fibhdr)));
1463 /*
1464 * Now send the Fib to the adapter
1465 */
1466 return aac_fib_send(ContainerCommand,
1467 fib,
1468 fibsize,
1469 FsaNormal,
1470 0, 1,
1471 (fib_callback) io_callback,
1472 (void *) cmd);
1473 }
1474
1475 static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
1476 {
1477 struct aac_srb * srbcmd;
1478 u32 flag;
1479 u32 timeout;
1480
1481 aac_fib_init(fib);
1482 switch(cmd->sc_data_direction){
1483 case DMA_TO_DEVICE:
1484 flag = SRB_DataOut;
1485 break;
1486 case DMA_BIDIRECTIONAL:
1487 flag = SRB_DataIn | SRB_DataOut;
1488 break;
1489 case DMA_FROM_DEVICE:
1490 flag = SRB_DataIn;
1491 break;
1492 case DMA_NONE:
1493 default: /* shuts up some versions of gcc */
1494 flag = SRB_NoDataXfer;
1495 break;
1496 }
1497
1498 srbcmd = (struct aac_srb*) fib_data(fib);
1499 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1500 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
1501 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1502 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1503 srbcmd->flags = cpu_to_le32(flag);
1504 timeout = cmd->request->timeout/HZ;
1505 if (timeout == 0)
1506 timeout = 1;
1507 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
1508 srbcmd->retry_limit = 0; /* Obsolete parameter */
1509 srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
1510 return srbcmd;
1511 }
1512
1513 static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1514 struct scsi_cmnd *cmd)
1515 {
1516 struct aac_hba_cmd_req *hbacmd;
1517 struct aac_dev *dev;
1518 int bus, target;
1519 u64 address;
1520
1521 dev = (struct aac_dev *)cmd->device->host->hostdata;
1522
1523 hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1524 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
1525 /* iu_type is a parameter of aac_hba_send */
1526 switch (cmd->sc_data_direction) {
1527 case DMA_TO_DEVICE:
1528 hbacmd->byte1 = 2;
1529 break;
1530 case DMA_FROM_DEVICE:
1531 case DMA_BIDIRECTIONAL:
1532 hbacmd->byte1 = 1;
1533 break;
1534 case DMA_NONE:
1535 default:
1536 break;
1537 }
1538 hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1539
1540 bus = aac_logical_to_phys(scmd_channel(cmd));
1541 target = scmd_id(cmd);
1542 hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1543
1544 /* we fill in reply_qid later in aac_src_deliver_message */
1545 /* we fill in iu_type, request_id later in aac_hba_send */
1546 /* we fill in emb_data_desc_count later in aac_build_sghba */
1547
1548 memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1549 hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1550
1551 address = (u64)fib->hw_error_pa;
1552 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1553 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1554 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1555
1556 return hbacmd;
1557 }
1558
1559 static void aac_srb_callback(void *context, struct fib * fibptr);
1560
1561 static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1562 {
1563 u16 fibsize;
1564 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1565 long ret;
1566
1567 ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
1568 if (ret < 0)
1569 return ret;
1570 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1571
1572 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1573 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1574 /*
1575 * Build Scatter/Gather list
1576 */
1577 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
1578 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
1579 sizeof (struct sgentry64));
1580 BUG_ON (fibsize > (fib->dev->max_fib_size -
1581 sizeof(struct aac_fibhdr)));
1582
1583 /*
1584 * Now send the Fib to the adapter
1585 */
1586 return aac_fib_send(ScsiPortCommand64, fib,
1587 fibsize, FsaNormal, 0, 1,
1588 (fib_callback) aac_srb_callback,
1589 (void *) cmd);
1590 }
1591
1592 static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1593 {
1594 u16 fibsize;
1595 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1596 long ret;
1597
1598 ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
1599 if (ret < 0)
1600 return ret;
1601 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1602
1603 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1604 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
1605 /*
1606 * Build Scatter/Gather list
1607 */
1608 fibsize = sizeof (struct aac_srb) +
1609 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
1610 sizeof (struct sgentry));
1611 BUG_ON (fibsize > (fib->dev->max_fib_size -
1612 sizeof(struct aac_fibhdr)));
1613
1614 /*
1615 * Now send the Fib to the adapter
1616 */
1617 return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
1618 (fib_callback) aac_srb_callback, (void *) cmd);
1619 }
1620
1621 static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1622 {
1623 if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
1624 (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
1625 return FAILED;
1626 return aac_scsi_32(fib, cmd);
1627 }
1628
1629 static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1630 {
1631 struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1632 struct aac_dev *dev;
1633 long ret;
1634
1635 dev = (struct aac_dev *)cmd->device->host->hostdata;
1636
1637 ret = aac_build_sghba(cmd, hbacmd,
1638 dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1639 if (ret < 0)
1640 return ret;
1641
1642 /*
1643 * Now send the HBA command to the adapter
1644 */
1645 fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1646 sizeof(struct aac_hba_sgl);
1647
1648 return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1649 (fib_callback) aac_hba_callback,
1650 (void *) cmd);
1651 }
1652
1653 int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
1654 {
1655 struct fib *fibptr;
1656 struct aac_srb *srbcmd;
1657 struct sgmap64 *sg64;
1658 struct aac_ciss_identify_pd *identify_resp;
1659 dma_addr_t addr;
1660 u32 vbus, vid;
1661 u16 fibsize, datasize;
1662 int rcode = -ENOMEM;
1663
1664
1665 fibptr = aac_fib_alloc(dev);
1666 if (!fibptr)
1667 goto out;
1668
1669 fibsize = sizeof(struct aac_srb) -
1670 sizeof(struct sgentry) + sizeof(struct sgentry64);
1671 datasize = sizeof(struct aac_ciss_identify_pd);
1672
1673 identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr);
1674
1675 if (!identify_resp)
1676 goto fib_free_ptr;
1677
1678 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
1679 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
1680
1681 aac_fib_init(fibptr);
1682
1683 srbcmd = (struct aac_srb *) fib_data(fibptr);
1684 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1685 srbcmd->channel = cpu_to_le32(vbus);
1686 srbcmd->id = cpu_to_le32(vid);
1687 srbcmd->lun = 0;
1688 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1689 srbcmd->timeout = cpu_to_le32(10);
1690 srbcmd->retry_limit = 0;
1691 srbcmd->cdb_size = cpu_to_le32(12);
1692 srbcmd->count = cpu_to_le32(datasize);
1693
1694 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1695 srbcmd->cdb[0] = 0x26;
1696 srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1697 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
1698
1699 sg64 = (struct sgmap64 *)&srbcmd->sg;
1700 sg64->count = cpu_to_le32(1);
1701 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
1702 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
1703 sg64->sg[0].count = cpu_to_le32(datasize);
1704
1705 rcode = aac_fib_send(ScsiPortCommand64,
1706 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
1707
1708 if (identify_resp->current_queue_depth_limit <= 0 ||
1709 identify_resp->current_queue_depth_limit > 32)
1710 dev->hba_map[bus][target].qd_limit = 32;
1711 else
1712 dev->hba_map[bus][target].qd_limit =
1713 identify_resp->current_queue_depth_limit;
1714
1715 pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
1716
1717 aac_fib_complete(fibptr);
1718
1719 fib_free_ptr:
1720 aac_fib_free(fibptr);
1721 out:
1722 return rcode;
1723 }
1724
1725 /**
1726 * aac_update hba_map()- update current hba map with data from FW
1727 * @dev: aac_dev structure
1728 * @phys_luns: FW information from report phys luns
1729 *
1730 * Update our hba map with the information gathered from the FW
1731 */
1732 void aac_update_hba_map(struct aac_dev *dev,
1733 struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
1734 {
1735 /* ok and extended reporting */
1736 u32 lun_count, nexus;
1737 u32 i, bus, target;
1738 u8 expose_flag, attribs;
1739 u8 devtype;
1740
1741 lun_count = ((phys_luns->list_length[0] << 24)
1742 + (phys_luns->list_length[1] << 16)
1743 + (phys_luns->list_length[2] << 8)
1744 + (phys_luns->list_length[3])) / 24;
1745
1746 for (i = 0; i < lun_count; ++i) {
1747
1748 bus = phys_luns->lun[i].level2[1] & 0x3f;
1749 target = phys_luns->lun[i].level2[0];
1750 expose_flag = phys_luns->lun[i].bus >> 6;
1751 attribs = phys_luns->lun[i].node_ident[9];
1752 nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
1753
1754 if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1755 continue;
1756
1757 dev->hba_map[bus][target].expose = expose_flag;
1758
1759 if (expose_flag != 0) {
1760 devtype = AAC_DEVTYPE_RAID_MEMBER;
1761 goto update_devtype;
1762 }
1763
1764 if (nexus != 0 && (attribs & 8)) {
1765 devtype = AAC_DEVTYPE_NATIVE_RAW;
1766 dev->hba_map[bus][target].rmw_nexus =
1767 nexus;
1768 } else
1769 devtype = AAC_DEVTYPE_ARC_RAW;
1770
1771 if (devtype != AAC_DEVTYPE_NATIVE_RAW)
1772 goto update_devtype;
1773
1774 if (aac_issue_bmic_identify(dev, bus, target) < 0)
1775 dev->hba_map[bus][target].qd_limit = 32;
1776
1777 update_devtype:
1778 if (rescan == AAC_INIT)
1779 dev->hba_map[bus][target].devtype = devtype;
1780 else
1781 dev->hba_map[bus][target].new_devtype = devtype;
1782 }
1783 }
1784
1785 /**
1786 * aac_report_phys_luns() Process topology change
1787 * @dev: aac_dev structure
1788 * @fibptr: fib pointer
1789 *
1790 * Execute a CISS REPORT PHYS LUNS and process the results into
1791 * the current hba_map.
1792 */
1793 int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
1794 {
1795 int fibsize, datasize;
1796 struct aac_ciss_phys_luns_resp *phys_luns;
1797 struct aac_srb *srbcmd;
1798 struct sgmap64 *sg64;
1799 dma_addr_t addr;
1800 u32 vbus, vid;
1801 int rcode = 0;
1802
1803 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
1804 fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
1805 + sizeof(struct sgentry64);
1806 datasize = sizeof(struct aac_ciss_phys_luns_resp)
1807 + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1808
1809 phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent(
1810 dev->pdev, datasize, &addr);
1811
1812 if (phys_luns == NULL) {
1813 rcode = -ENOMEM;
1814 goto err_out;
1815 }
1816
1817 vbus = (u32) le16_to_cpu(
1818 dev->supplement_adapter_info.VirtDeviceBus);
1819 vid = (u32) le16_to_cpu(
1820 dev->supplement_adapter_info.VirtDeviceTarget);
1821
1822 aac_fib_init(fibptr);
1823
1824 srbcmd = (struct aac_srb *) fib_data(fibptr);
1825 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1826 srbcmd->channel = cpu_to_le32(vbus);
1827 srbcmd->id = cpu_to_le32(vid);
1828 srbcmd->lun = 0;
1829 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1830 srbcmd->timeout = cpu_to_le32(10);
1831 srbcmd->retry_limit = 0;
1832 srbcmd->cdb_size = cpu_to_le32(12);
1833 srbcmd->count = cpu_to_le32(datasize);
1834
1835 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1836 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
1837 srbcmd->cdb[1] = 2; /* extended reporting */
1838 srbcmd->cdb[8] = (u8)(datasize >> 8);
1839 srbcmd->cdb[9] = (u8)(datasize);
1840
1841 sg64 = (struct sgmap64 *) &srbcmd->sg;
1842 sg64->count = cpu_to_le32(1);
1843 sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
1844 sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
1845 sg64->sg[0].count = cpu_to_le32(datasize);
1846
1847 rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
1848 FsaNormal, 1, 1, NULL, NULL);
1849
1850 /* analyse data */
1851 if (rcode >= 0 && phys_luns->resp_flag == 2) {
1852 /* ok and extended reporting */
1853 aac_update_hba_map(dev, phys_luns, rescan);
1854 }
1855
1856 pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr);
1857 err_out:
1858 return rcode;
1859 }
1860
1861 int aac_get_adapter_info(struct aac_dev* dev)
1862 {
1863 struct fib* fibptr;
1864 int rcode;
1865 u32 tmp, bus, target;
1866 struct aac_adapter_info *info;
1867 struct aac_bus_info *command;
1868 struct aac_bus_info_response *bus_info;
1869
1870 if (!(fibptr = aac_fib_alloc(dev)))
1871 return -ENOMEM;
1872
1873 aac_fib_init(fibptr);
1874 info = (struct aac_adapter_info *) fib_data(fibptr);
1875 memset(info,0,sizeof(*info));
1876
1877 rcode = aac_fib_send(RequestAdapterInfo,
1878 fibptr,
1879 sizeof(*info),
1880 FsaNormal,
1881 -1, 1, /* First `interrupt' command uses special wait */
1882 NULL,
1883 NULL);
1884
1885 if (rcode < 0) {
1886 /* FIB should be freed only after
1887 * getting the response from the F/W */
1888 if (rcode != -ERESTARTSYS) {
1889 aac_fib_complete(fibptr);
1890 aac_fib_free(fibptr);
1891 }
1892 return rcode;
1893 }
1894 memcpy(&dev->adapter_info, info, sizeof(*info));
1895
1896 dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
1897 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
1898 struct aac_supplement_adapter_info * sinfo;
1899
1900 aac_fib_init(fibptr);
1901
1902 sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
1903
1904 memset(sinfo,0,sizeof(*sinfo));
1905
1906 rcode = aac_fib_send(RequestSupplementAdapterInfo,
1907 fibptr,
1908 sizeof(*sinfo),
1909 FsaNormal,
1910 1, 1,
1911 NULL,
1912 NULL);
1913
1914 if (rcode >= 0)
1915 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
1916 if (rcode == -ERESTARTSYS) {
1917 fibptr = aac_fib_alloc(dev);
1918 if (!fibptr)
1919 return -ENOMEM;
1920 }
1921
1922 }
1923
1924 /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
1925 for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
1926 for (target = 0; target < AAC_MAX_TARGETS; target++) {
1927 dev->hba_map[bus][target].devtype = 0;
1928 dev->hba_map[bus][target].qd_limit = 0;
1929 }
1930 }
1931
1932 /*
1933 * GetBusInfo
1934 */
1935
1936 aac_fib_init(fibptr);
1937
1938 bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
1939
1940 memset(bus_info, 0, sizeof(*bus_info));
1941
1942 command = (struct aac_bus_info *)bus_info;
1943
1944 command->Command = cpu_to_le32(VM_Ioctl);
1945 command->ObjType = cpu_to_le32(FT_DRIVE);
1946 command->MethodId = cpu_to_le32(1);
1947 command->CtlCmd = cpu_to_le32(GetBusInfo);
1948
1949 rcode = aac_fib_send(ContainerCommand,
1950 fibptr,
1951 sizeof (*bus_info),
1952 FsaNormal,
1953 1, 1,
1954 NULL, NULL);
1955
1956 /* reasoned default */
1957 dev->maximum_num_physicals = 16;
1958 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
1959 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
1960 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
1961 }
1962
1963 if (!dev->sync_mode && dev->sa_firmware &&
1964 dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
1965 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
1966 rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
1967 }
1968
1969 if (!dev->in_reset) {
1970 char buffer[16];
1971 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
1972 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
1973 dev->name,
1974 dev->id,
1975 tmp>>24,
1976 (tmp>>16)&0xff,
1977 tmp&0xff,
1978 le32_to_cpu(dev->adapter_info.kernelbuild),
1979 (int)sizeof(dev->supplement_adapter_info.BuildDate),
1980 dev->supplement_adapter_info.BuildDate);
1981 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
1982 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
1983 dev->name, dev->id,
1984 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
1985 le32_to_cpu(dev->adapter_info.monitorbuild));
1986 tmp = le32_to_cpu(dev->adapter_info.biosrev);
1987 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
1988 dev->name, dev->id,
1989 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
1990 le32_to_cpu(dev->adapter_info.biosbuild));
1991 buffer[0] = '\0';
1992 if (aac_get_serial_number(
1993 shost_to_class(dev->scsi_host_ptr), buffer))
1994 printk(KERN_INFO "%s%d: serial %s",
1995 dev->name, dev->id, buffer);
1996 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
1997 printk(KERN_INFO "%s%d: TSID %.*s\n",
1998 dev->name, dev->id,
1999 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
2000 dev->supplement_adapter_info.VpdInfo.Tsid);
2001 }
2002 if (!aac_check_reset || ((aac_check_reset == 1) &&
2003 (dev->supplement_adapter_info.SupportedOptions2 &
2004 AAC_OPTION_IGNORE_RESET))) {
2005 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
2006 dev->name, dev->id);
2007 }
2008 }
2009
2010 dev->cache_protected = 0;
2011 dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
2012 AAC_FEATURE_JBOD) != 0);
2013 dev->nondasd_support = 0;
2014 dev->raid_scsi_mode = 0;
2015 if(dev->adapter_info.options & AAC_OPT_NONDASD)
2016 dev->nondasd_support = 1;
2017
2018 /*
2019 * If the firmware supports ROMB RAID/SCSI mode and we are currently
2020 * in RAID/SCSI mode, set the flag. For now if in this mode we will
2021 * force nondasd support on. If we decide to allow the non-dasd flag
2022 * additional changes changes will have to be made to support
2023 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be
2024 * changed to support the new dev->raid_scsi_mode flag instead of
2025 * leaching off of the dev->nondasd_support flag. Also in linit.c the
2026 * function aac_detect will have to be modified where it sets up the
2027 * max number of channels based on the aac->nondasd_support flag only.
2028 */
2029 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
2030 (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
2031 dev->nondasd_support = 1;
2032 dev->raid_scsi_mode = 1;
2033 }
2034 if (dev->raid_scsi_mode != 0)
2035 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
2036 dev->name, dev->id);
2037
2038 if (nondasd != -1)
2039 dev->nondasd_support = (nondasd!=0);
2040 if (dev->nondasd_support && !dev->in_reset)
2041 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
2042
2043 if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
2044 dev->needs_dac = 1;
2045 dev->dac_support = 0;
2046 if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
2047 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
2048 if (!dev->in_reset)
2049 printk(KERN_INFO "%s%d: 64bit support enabled.\n",
2050 dev->name, dev->id);
2051 dev->dac_support = 1;
2052 }
2053
2054 if(dacmode != -1) {
2055 dev->dac_support = (dacmode!=0);
2056 }
2057
2058 /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
2059 if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
2060 & AAC_QUIRK_SCSI_32)) {
2061 dev->nondasd_support = 0;
2062 dev->jbod = 0;
2063 expose_physicals = 0;
2064 }
2065
2066 if(dev->dac_support != 0) {
2067 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
2068 !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
2069 if (!dev->in_reset)
2070 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
2071 dev->name, dev->id);
2072 } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
2073 !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
2074 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
2075 dev->name, dev->id);
2076 dev->dac_support = 0;
2077 } else {
2078 printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
2079 dev->name, dev->id);
2080 rcode = -ENOMEM;
2081 }
2082 }
2083 /*
2084 * Deal with configuring for the individualized limits of each packet
2085 * interface.
2086 */
2087 dev->a_ops.adapter_scsi = (dev->dac_support)
2088 ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
2089 ? aac_scsi_32_64
2090 : aac_scsi_64)
2091 : aac_scsi_32;
2092 if (dev->raw_io_interface) {
2093 dev->a_ops.adapter_bounds = (dev->raw_io_64)
2094 ? aac_bounds_64
2095 : aac_bounds_32;
2096 dev->a_ops.adapter_read = aac_read_raw_io;
2097 dev->a_ops.adapter_write = aac_write_raw_io;
2098 } else {
2099 dev->a_ops.adapter_bounds = aac_bounds_32;
2100 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
2101 sizeof(struct aac_fibhdr) -
2102 sizeof(struct aac_write) + sizeof(struct sgentry)) /
2103 sizeof(struct sgentry);
2104 if (dev->dac_support) {
2105 dev->a_ops.adapter_read = aac_read_block64;
2106 dev->a_ops.adapter_write = aac_write_block64;
2107 /*
2108 * 38 scatter gather elements
2109 */
2110 dev->scsi_host_ptr->sg_tablesize =
2111 (dev->max_fib_size -
2112 sizeof(struct aac_fibhdr) -
2113 sizeof(struct aac_write64) +
2114 sizeof(struct sgentry64)) /
2115 sizeof(struct sgentry64);
2116 } else {
2117 dev->a_ops.adapter_read = aac_read_block;
2118 dev->a_ops.adapter_write = aac_write_block;
2119 }
2120 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
2121 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
2122 /*
2123 * Worst case size that could cause sg overflow when
2124 * we break up SG elements that are larger than 64KB.
2125 * Would be nice if we could tell the SCSI layer what
2126 * the maximum SG element size can be. Worst case is
2127 * (sg_tablesize-1) 4KB elements with one 64KB
2128 * element.
2129 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
2130 */
2131 dev->scsi_host_ptr->max_sectors =
2132 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
2133 }
2134 }
2135 if (!dev->sync_mode && dev->sa_firmware &&
2136 dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2137 dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2138 HBA_MAX_SG_SEPARATE;
2139
2140 /* FIB should be freed only after getting the response from the F/W */
2141 if (rcode != -ERESTARTSYS) {
2142 aac_fib_complete(fibptr);
2143 aac_fib_free(fibptr);
2144 }
2145
2146 return rcode;
2147 }
2148
2149
2150 static void io_callback(void *context, struct fib * fibptr)
2151 {
2152 struct aac_dev *dev;
2153 struct aac_read_reply *readreply;
2154 struct scsi_cmnd *scsicmd;
2155 u32 cid;
2156
2157 scsicmd = (struct scsi_cmnd *) context;
2158
2159 if (!aac_valid_context(scsicmd, fibptr))
2160 return;
2161
2162 dev = fibptr->dev;
2163 cid = scmd_id(scsicmd);
2164
2165 if (nblank(dprintk(x))) {
2166 u64 lba;
2167 switch (scsicmd->cmnd[0]) {
2168 case WRITE_6:
2169 case READ_6:
2170 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2171 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2172 break;
2173 case WRITE_16:
2174 case READ_16:
2175 lba = ((u64)scsicmd->cmnd[2] << 56) |
2176 ((u64)scsicmd->cmnd[3] << 48) |
2177 ((u64)scsicmd->cmnd[4] << 40) |
2178 ((u64)scsicmd->cmnd[5] << 32) |
2179 ((u64)scsicmd->cmnd[6] << 24) |
2180 (scsicmd->cmnd[7] << 16) |
2181 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2182 break;
2183 case WRITE_12:
2184 case READ_12:
2185 lba = ((u64)scsicmd->cmnd[2] << 24) |
2186 (scsicmd->cmnd[3] << 16) |
2187 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2188 break;
2189 default:
2190 lba = ((u64)scsicmd->cmnd[2] << 24) |
2191 (scsicmd->cmnd[3] << 16) |
2192 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2193 break;
2194 }
2195 printk(KERN_DEBUG
2196 "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
2197 smp_processor_id(), (unsigned long long)lba, jiffies);
2198 }
2199
2200 BUG_ON(fibptr == NULL);
2201
2202 scsi_dma_unmap(scsicmd);
2203
2204 readreply = (struct aac_read_reply *)fib_data(fibptr);
2205 switch (le32_to_cpu(readreply->status)) {
2206 case ST_OK:
2207 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2208 SAM_STAT_GOOD;
2209 dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
2210 break;
2211 case ST_NOT_READY:
2212 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2213 SAM_STAT_CHECK_CONDITION;
2214 set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
2215 SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
2216 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2217 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2218 SCSI_SENSE_BUFFERSIZE));
2219 break;
2220 case ST_MEDERR:
2221 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2222 SAM_STAT_CHECK_CONDITION;
2223 set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2224 SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2225 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2226 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2227 SCSI_SENSE_BUFFERSIZE));
2228 break;
2229 default:
2230 #ifdef AAC_DETAILED_STATUS_INFO
2231 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
2232 le32_to_cpu(readreply->status));
2233 #endif
2234 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2235 SAM_STAT_CHECK_CONDITION;
2236 set_sense(&dev->fsa_dev[cid].sense_data,
2237 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2238 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2239 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2240 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2241 SCSI_SENSE_BUFFERSIZE));
2242 break;
2243 }
2244 aac_fib_complete(fibptr);
2245
2246 scsicmd->scsi_done(scsicmd);
2247 }
2248
2249 static int aac_read(struct scsi_cmnd * scsicmd)
2250 {
2251 u64 lba;
2252 u32 count;
2253 int status;
2254 struct aac_dev *dev;
2255 struct fib * cmd_fibcontext;
2256 int cid;
2257
2258 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2259 /*
2260 * Get block address and transfer length
2261 */
2262 switch (scsicmd->cmnd[0]) {
2263 case READ_6:
2264 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
2265
2266 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
2267 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2268 count = scsicmd->cmnd[4];
2269
2270 if (count == 0)
2271 count = 256;
2272 break;
2273 case READ_16:
2274 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
2275
2276 lba = ((u64)scsicmd->cmnd[2] << 56) |
2277 ((u64)scsicmd->cmnd[3] << 48) |
2278 ((u64)scsicmd->cmnd[4] << 40) |
2279 ((u64)scsicmd->cmnd[5] << 32) |
2280 ((u64)scsicmd->cmnd[6] << 24) |
2281 (scsicmd->cmnd[7] << 16) |
2282 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2283 count = (scsicmd->cmnd[10] << 24) |
2284 (scsicmd->cmnd[11] << 16) |
2285 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2286 break;
2287 case READ_12:
2288 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
2289
2290 lba = ((u64)scsicmd->cmnd[2] << 24) |
2291 (scsicmd->cmnd[3] << 16) |
2292 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2293 count = (scsicmd->cmnd[6] << 24) |
2294 (scsicmd->cmnd[7] << 16) |
2295 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2296 break;
2297 default:
2298 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
2299
2300 lba = ((u64)scsicmd->cmnd[2] << 24) |
2301 (scsicmd->cmnd[3] << 16) |
2302 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2303 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2304 break;
2305 }
2306
2307 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2308 cid = scmd_id(scsicmd);
2309 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2310 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2311 SAM_STAT_CHECK_CONDITION;
2312 set_sense(&dev->fsa_dev[cid].sense_data,
2313 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2314 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2315 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2316 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2317 SCSI_SENSE_BUFFERSIZE));
2318 scsicmd->scsi_done(scsicmd);
2319 return 1;
2320 }
2321
2322 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
2323 smp_processor_id(), (unsigned long long)lba, jiffies));
2324 if (aac_adapter_bounds(dev,scsicmd,lba))
2325 return 0;
2326 /*
2327 * Alocate and initialize a Fib
2328 */
2329 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2330
2331 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
2332
2333 /*
2334 * Check that the command queued to the controller
2335 */
2336 if (status == -EINPROGRESS) {
2337 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2338 return 0;
2339 }
2340
2341 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
2342 /*
2343 * For some reason, the Fib didn't queue, return QUEUE_FULL
2344 */
2345 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2346 scsicmd->scsi_done(scsicmd);
2347 aac_fib_complete(cmd_fibcontext);
2348 aac_fib_free(cmd_fibcontext);
2349 return 0;
2350 }
2351
2352 static int aac_write(struct scsi_cmnd * scsicmd)
2353 {
2354 u64 lba;
2355 u32 count;
2356 int fua;
2357 int status;
2358 struct aac_dev *dev;
2359 struct fib * cmd_fibcontext;
2360 int cid;
2361
2362 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2363 /*
2364 * Get block address and transfer length
2365 */
2366 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
2367 {
2368 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
2369 count = scsicmd->cmnd[4];
2370 if (count == 0)
2371 count = 256;
2372 fua = 0;
2373 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
2374 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
2375
2376 lba = ((u64)scsicmd->cmnd[2] << 56) |
2377 ((u64)scsicmd->cmnd[3] << 48) |
2378 ((u64)scsicmd->cmnd[4] << 40) |
2379 ((u64)scsicmd->cmnd[5] << 32) |
2380 ((u64)scsicmd->cmnd[6] << 24) |
2381 (scsicmd->cmnd[7] << 16) |
2382 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2383 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
2384 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
2385 fua = scsicmd->cmnd[1] & 0x8;
2386 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
2387 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
2388
2389 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
2390 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2391 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
2392 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
2393 fua = scsicmd->cmnd[1] & 0x8;
2394 } else {
2395 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
2396 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2397 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2398 fua = scsicmd->cmnd[1] & 0x8;
2399 }
2400
2401 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
2402 cid = scmd_id(scsicmd);
2403 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2404 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2405 SAM_STAT_CHECK_CONDITION;
2406 set_sense(&dev->fsa_dev[cid].sense_data,
2407 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2408 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2409 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2410 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2411 SCSI_SENSE_BUFFERSIZE));
2412 scsicmd->scsi_done(scsicmd);
2413 return 1;
2414 }
2415
2416 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
2417 smp_processor_id(), (unsigned long long)lba, jiffies));
2418 if (aac_adapter_bounds(dev,scsicmd,lba))
2419 return 0;
2420 /*
2421 * Allocate and initialize a Fib then setup a BlockWrite command
2422 */
2423 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
2424
2425 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
2426
2427 /*
2428 * Check that the command queued to the controller
2429 */
2430 if (status == -EINPROGRESS) {
2431 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2432 return 0;
2433 }
2434
2435 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
2436 /*
2437 * For some reason, the Fib didn't queue, return QUEUE_FULL
2438 */
2439 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
2440 scsicmd->scsi_done(scsicmd);
2441
2442 aac_fib_complete(cmd_fibcontext);
2443 aac_fib_free(cmd_fibcontext);
2444 return 0;
2445 }
2446
2447 static void synchronize_callback(void *context, struct fib *fibptr)
2448 {
2449 struct aac_synchronize_reply *synchronizereply;
2450 struct scsi_cmnd *cmd;
2451
2452 cmd = context;
2453
2454 if (!aac_valid_context(cmd, fibptr))
2455 return;
2456
2457 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
2458 smp_processor_id(), jiffies));
2459 BUG_ON(fibptr == NULL);
2460
2461
2462 synchronizereply = fib_data(fibptr);
2463 if (le32_to_cpu(synchronizereply->status) == CT_OK)
2464 cmd->result = DID_OK << 16 |
2465 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2466 else {
2467 struct scsi_device *sdev = cmd->device;
2468 struct aac_dev *dev = fibptr->dev;
2469 u32 cid = sdev_id(sdev);
2470 printk(KERN_WARNING
2471 "synchronize_callback: synchronize failed, status = %d\n",
2472 le32_to_cpu(synchronizereply->status));
2473 cmd->result = DID_OK << 16 |
2474 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2475 set_sense(&dev->fsa_dev[cid].sense_data,
2476 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2477 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2478 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2479 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2480 SCSI_SENSE_BUFFERSIZE));
2481 }
2482
2483 aac_fib_complete(fibptr);
2484 aac_fib_free(fibptr);
2485 cmd->scsi_done(cmd);
2486 }
2487
2488 static int aac_synchronize(struct scsi_cmnd *scsicmd)
2489 {
2490 int status;
2491 struct fib *cmd_fibcontext;
2492 struct aac_synchronize *synchronizecmd;
2493 struct scsi_cmnd *cmd;
2494 struct scsi_device *sdev = scsicmd->device;
2495 int active = 0;
2496 struct aac_dev *aac;
2497 u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
2498 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
2499 u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
2500 unsigned long flags;
2501
2502 /*
2503 * Wait for all outstanding queued commands to complete to this
2504 * specific target (block).
2505 */
2506 spin_lock_irqsave(&sdev->list_lock, flags);
2507 list_for_each_entry(cmd, &sdev->cmd_list, list)
2508 if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
2509 u64 cmnd_lba;
2510 u32 cmnd_count;
2511
2512 if (cmd->cmnd[0] == WRITE_6) {
2513 cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
2514 (cmd->cmnd[2] << 8) |
2515 cmd->cmnd[3];
2516 cmnd_count = cmd->cmnd[4];
2517 if (cmnd_count == 0)
2518 cmnd_count = 256;
2519 } else if (cmd->cmnd[0] == WRITE_16) {
2520 cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
2521 ((u64)cmd->cmnd[3] << 48) |
2522 ((u64)cmd->cmnd[4] << 40) |
2523 ((u64)cmd->cmnd[5] << 32) |
2524 ((u64)cmd->cmnd[6] << 24) |
2525 (cmd->cmnd[7] << 16) |
2526 (cmd->cmnd[8] << 8) |
2527 cmd->cmnd[9];
2528 cmnd_count = (cmd->cmnd[10] << 24) |
2529 (cmd->cmnd[11] << 16) |
2530 (cmd->cmnd[12] << 8) |
2531 cmd->cmnd[13];
2532 } else if (cmd->cmnd[0] == WRITE_12) {
2533 cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2534 (cmd->cmnd[3] << 16) |
2535 (cmd->cmnd[4] << 8) |
2536 cmd->cmnd[5];
2537 cmnd_count = (cmd->cmnd[6] << 24) |
2538 (cmd->cmnd[7] << 16) |
2539 (cmd->cmnd[8] << 8) |
2540 cmd->cmnd[9];
2541 } else if (cmd->cmnd[0] == WRITE_10) {
2542 cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
2543 (cmd->cmnd[3] << 16) |
2544 (cmd->cmnd[4] << 8) |
2545 cmd->cmnd[5];
2546 cmnd_count = (cmd->cmnd[7] << 8) |
2547 cmd->cmnd[8];
2548 } else
2549 continue;
2550 if (((cmnd_lba + cmnd_count) < lba) ||
2551 (count && ((lba + count) < cmnd_lba)))
2552 continue;
2553 ++active;
2554 break;
2555 }
2556
2557 spin_unlock_irqrestore(&sdev->list_lock, flags);
2558
2559 /*
2560 * Yield the processor (requeue for later)
2561 */
2562 if (active)
2563 return SCSI_MLQUEUE_DEVICE_BUSY;
2564
2565 aac = (struct aac_dev *)sdev->host->hostdata;
2566 if (aac->in_reset)
2567 return SCSI_MLQUEUE_HOST_BUSY;
2568
2569 /*
2570 * Allocate and initialize a Fib
2571 */
2572 if (!(cmd_fibcontext = aac_fib_alloc(aac)))
2573 return SCSI_MLQUEUE_HOST_BUSY;
2574
2575 aac_fib_init(cmd_fibcontext);
2576
2577 synchronizecmd = fib_data(cmd_fibcontext);
2578 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
2579 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
2580 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
2581 synchronizecmd->count =
2582 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
2583
2584 /*
2585 * Now send the Fib to the adapter
2586 */
2587 status = aac_fib_send(ContainerCommand,
2588 cmd_fibcontext,
2589 sizeof(struct aac_synchronize),
2590 FsaNormal,
2591 0, 1,
2592 (fib_callback)synchronize_callback,
2593 (void *)scsicmd);
2594
2595 /*
2596 * Check that the command queued to the controller
2597 */
2598 if (status == -EINPROGRESS) {
2599 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2600 return 0;
2601 }
2602
2603 printk(KERN_WARNING
2604 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
2605 aac_fib_complete(cmd_fibcontext);
2606 aac_fib_free(cmd_fibcontext);
2607 return SCSI_MLQUEUE_HOST_BUSY;
2608 }
2609
2610 static void aac_start_stop_callback(void *context, struct fib *fibptr)
2611 {
2612 struct scsi_cmnd *scsicmd = context;
2613
2614 if (!aac_valid_context(scsicmd, fibptr))
2615 return;
2616
2617 BUG_ON(fibptr == NULL);
2618
2619 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2620
2621 aac_fib_complete(fibptr);
2622 aac_fib_free(fibptr);
2623 scsicmd->scsi_done(scsicmd);
2624 }
2625
2626 static int aac_start_stop(struct scsi_cmnd *scsicmd)
2627 {
2628 int status;
2629 struct fib *cmd_fibcontext;
2630 struct aac_power_management *pmcmd;
2631 struct scsi_device *sdev = scsicmd->device;
2632 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
2633
2634 if (!(aac->supplement_adapter_info.SupportedOptions2 &
2635 AAC_OPTION_POWER_MANAGEMENT)) {
2636 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2637 SAM_STAT_GOOD;
2638 scsicmd->scsi_done(scsicmd);
2639 return 0;
2640 }
2641
2642 if (aac->in_reset)
2643 return SCSI_MLQUEUE_HOST_BUSY;
2644
2645 /*
2646 * Allocate and initialize a Fib
2647 */
2648 cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
2649
2650 aac_fib_init(cmd_fibcontext);
2651
2652 pmcmd = fib_data(cmd_fibcontext);
2653 pmcmd->command = cpu_to_le32(VM_ContainerConfig);
2654 pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
2655 /* Eject bit ignored, not relevant */
2656 pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
2657 cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
2658 pmcmd->cid = cpu_to_le32(sdev_id(sdev));
2659 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
2660 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
2661
2662 /*
2663 * Now send the Fib to the adapter
2664 */
2665 status = aac_fib_send(ContainerCommand,
2666 cmd_fibcontext,
2667 sizeof(struct aac_power_management),
2668 FsaNormal,
2669 0, 1,
2670 (fib_callback)aac_start_stop_callback,
2671 (void *)scsicmd);
2672
2673 /*
2674 * Check that the command queued to the controller
2675 */
2676 if (status == -EINPROGRESS) {
2677 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2678 return 0;
2679 }
2680
2681 aac_fib_complete(cmd_fibcontext);
2682 aac_fib_free(cmd_fibcontext);
2683 return SCSI_MLQUEUE_HOST_BUSY;
2684 }
2685
2686 /**
2687 * aac_scsi_cmd() - Process SCSI command
2688 * @scsicmd: SCSI command block
2689 *
2690 * Emulate a SCSI command and queue the required request for the
2691 * aacraid firmware.
2692 */
2693
2694 int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2695 {
2696 u32 cid, bus;
2697 struct Scsi_Host *host = scsicmd->device->host;
2698 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2699 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
2700
2701 if (fsa_dev_ptr == NULL)
2702 return -1;
2703 /*
2704 * If the bus, id or lun is out of range, return fail
2705 * Test does not apply to ID 16, the pseudo id for the controller
2706 * itself.
2707 */
2708 cid = scmd_id(scsicmd);
2709 if (cid != host->this_id) {
2710 if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
2711 if((cid >= dev->maximum_num_containers) ||
2712 (scsicmd->device->lun != 0)) {
2713 scsicmd->result = DID_NO_CONNECT << 16;
2714 goto scsi_done_ret;
2715 }
2716
2717 /*
2718 * If the target container doesn't exist, it may have
2719 * been newly created
2720 */
2721 if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
2722 (fsa_dev_ptr[cid].sense_data.sense_key ==
2723 NOT_READY)) {
2724 switch (scsicmd->cmnd[0]) {
2725 case SERVICE_ACTION_IN_16:
2726 if (!(dev->raw_io_interface) ||
2727 !(dev->raw_io_64) ||
2728 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2729 break;
2730 case INQUIRY:
2731 case READ_CAPACITY:
2732 case TEST_UNIT_READY:
2733 if (dev->in_reset)
2734 return -1;
2735 return _aac_probe_container(scsicmd,
2736 aac_probe_container_callback2);
2737 default:
2738 break;
2739 }
2740 }
2741 } else { /* check for physical non-dasd devices */
2742 bus = aac_logical_to_phys(scmd_channel(scsicmd));
2743 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2744 (dev->hba_map[bus][cid].expose
2745 == AAC_HIDE_DISK)){
2746 if (scsicmd->cmnd[0] == INQUIRY) {
2747 scsicmd->result = DID_NO_CONNECT << 16;
2748 goto scsi_done_ret;
2749 }
2750 }
2751
2752 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2753 dev->hba_map[bus][cid].devtype
2754 == AAC_DEVTYPE_NATIVE_RAW) {
2755 if (dev->in_reset)
2756 return -1;
2757 return aac_send_hba_fib(scsicmd);
2758 } else if (dev->nondasd_support || expose_physicals ||
2759 dev->jbod) {
2760 if (dev->in_reset)
2761 return -1;
2762 return aac_send_srb_fib(scsicmd);
2763 } else {
2764 scsicmd->result = DID_NO_CONNECT << 16;
2765 goto scsi_done_ret;
2766 }
2767 }
2768 }
2769 /*
2770 * else Command for the controller itself
2771 */
2772 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
2773 (scsicmd->cmnd[0] != TEST_UNIT_READY))
2774 {
2775 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
2776 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2777 set_sense(&dev->fsa_dev[cid].sense_data,
2778 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2779 ASENCODE_INVALID_COMMAND, 0, 0);
2780 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2781 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2782 SCSI_SENSE_BUFFERSIZE));
2783 goto scsi_done_ret;
2784 }
2785
2786 switch (scsicmd->cmnd[0]) {
2787 case READ_6:
2788 case READ_10:
2789 case READ_12:
2790 case READ_16:
2791 if (dev->in_reset)
2792 return -1;
2793 return aac_read(scsicmd);
2794
2795 case WRITE_6:
2796 case WRITE_10:
2797 case WRITE_12:
2798 case WRITE_16:
2799 if (dev->in_reset)
2800 return -1;
2801 return aac_write(scsicmd);
2802
2803 case SYNCHRONIZE_CACHE:
2804 if (((aac_cache & 6) == 6) && dev->cache_protected) {
2805 scsicmd->result = AAC_STAT_GOOD;
2806 break;
2807 }
2808 /* Issue FIB to tell Firmware to flush it's cache */
2809 if ((aac_cache & 6) != 2)
2810 return aac_synchronize(scsicmd);
2811 case INQUIRY:
2812 {
2813 struct inquiry_data inq_data;
2814
2815 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
2816 memset(&inq_data, 0, sizeof (struct inquiry_data));
2817
2818 if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
2819 char *arr = (char *)&inq_data;
2820
2821 /* EVPD bit set */
2822 arr[0] = (scmd_id(scsicmd) == host->this_id) ?
2823 INQD_PDT_PROC : INQD_PDT_DA;
2824 if (scsicmd->cmnd[2] == 0) {
2825 /* supported vital product data pages */
2826 arr[3] = 3;
2827 arr[4] = 0x0;
2828 arr[5] = 0x80;
2829 arr[6] = 0x83;
2830 arr[1] = scsicmd->cmnd[2];
2831 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2832 sizeof(inq_data));
2833 scsicmd->result = AAC_STAT_GOOD;
2834 } else if (scsicmd->cmnd[2] == 0x80) {
2835 /* unit serial number page */
2836 arr[3] = setinqserial(dev, &arr[4],
2837 scmd_id(scsicmd));
2838 arr[1] = scsicmd->cmnd[2];
2839 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2840 sizeof(inq_data));
2841 if (aac_wwn != 2)
2842 return aac_get_container_serial(
2843 scsicmd);
2844 scsicmd->result = AAC_STAT_GOOD;
2845 } else if (scsicmd->cmnd[2] == 0x83) {
2846 /* vpd page 0x83 - Device Identification Page */
2847 char *sno = (char *)&inq_data;
2848 sno[3] = setinqserial(dev, &sno[4],
2849 scmd_id(scsicmd));
2850 if (aac_wwn != 2)
2851 return aac_get_container_serial(
2852 scsicmd);
2853 scsicmd->result = AAC_STAT_GOOD;
2854 } else {
2855 /* vpd page not implemented */
2856 scsicmd->result = DID_OK << 16 |
2857 COMMAND_COMPLETE << 8 |
2858 SAM_STAT_CHECK_CONDITION;
2859 set_sense(&dev->fsa_dev[cid].sense_data,
2860 ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
2861 ASENCODE_NO_SENSE, 7, 2);
2862 memcpy(scsicmd->sense_buffer,
2863 &dev->fsa_dev[cid].sense_data,
2864 min_t(size_t,
2865 sizeof(dev->fsa_dev[cid].sense_data),
2866 SCSI_SENSE_BUFFERSIZE));
2867 }
2868 break;
2869 }
2870 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
2871 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
2872 inq_data.inqd_len = 31;
2873 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
2874 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
2875 /*
2876 * Set the Vendor, Product, and Revision Level
2877 * see: <vendor>.c i.e. aac.c
2878 */
2879 if (cid == host->this_id) {
2880 setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
2881 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
2882 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2883 sizeof(inq_data));
2884 scsicmd->result = AAC_STAT_GOOD;
2885 break;
2886 }
2887 if (dev->in_reset)
2888 return -1;
2889 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
2890 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
2891 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
2892 return aac_get_container_name(scsicmd);
2893 }
2894 case SERVICE_ACTION_IN_16:
2895 if (!(dev->raw_io_interface) ||
2896 !(dev->raw_io_64) ||
2897 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2898 break;
2899 {
2900 u64 capacity;
2901 char cp[13];
2902 unsigned int alloc_len;
2903
2904 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
2905 capacity = fsa_dev_ptr[cid].size - 1;
2906 cp[0] = (capacity >> 56) & 0xff;
2907 cp[1] = (capacity >> 48) & 0xff;
2908 cp[2] = (capacity >> 40) & 0xff;
2909 cp[3] = (capacity >> 32) & 0xff;
2910 cp[4] = (capacity >> 24) & 0xff;
2911 cp[5] = (capacity >> 16) & 0xff;
2912 cp[6] = (capacity >> 8) & 0xff;
2913 cp[7] = (capacity >> 0) & 0xff;
2914 cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
2915 cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
2916 cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
2917 cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
2918 cp[12] = 0;
2919
2920 alloc_len = ((scsicmd->cmnd[10] << 24)
2921 + (scsicmd->cmnd[11] << 16)
2922 + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
2923
2924 alloc_len = min_t(size_t, alloc_len, sizeof(cp));
2925 scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
2926 if (alloc_len < scsi_bufflen(scsicmd))
2927 scsi_set_resid(scsicmd,
2928 scsi_bufflen(scsicmd) - alloc_len);
2929
2930 /* Do not cache partition table for arrays */
2931 scsicmd->device->removable = 1;
2932
2933 scsicmd->result = AAC_STAT_GOOD;
2934 break;
2935 }
2936
2937 case READ_CAPACITY:
2938 {
2939 u32 capacity;
2940 char cp[8];
2941
2942 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
2943 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
2944 capacity = fsa_dev_ptr[cid].size - 1;
2945 else
2946 capacity = (u32)-1;
2947
2948 cp[0] = (capacity >> 24) & 0xff;
2949 cp[1] = (capacity >> 16) & 0xff;
2950 cp[2] = (capacity >> 8) & 0xff;
2951 cp[3] = (capacity >> 0) & 0xff;
2952 cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
2953 cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
2954 cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
2955 cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
2956 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
2957 /* Do not cache partition table for arrays */
2958 scsicmd->device->removable = 1;
2959 scsicmd->result = AAC_STAT_GOOD;
2960 break;
2961 }
2962
2963 case MODE_SENSE:
2964 {
2965 int mode_buf_length = 4;
2966 u32 capacity;
2967 aac_modep_data mpd;
2968
2969 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
2970 capacity = fsa_dev_ptr[cid].size - 1;
2971 else
2972 capacity = (u32)-1;
2973
2974 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
2975 memset((char *)&mpd, 0, sizeof(aac_modep_data));
2976
2977 /* Mode data length */
2978 mpd.hd.data_length = sizeof(mpd.hd) - 1;
2979 /* Medium type - default */
2980 mpd.hd.med_type = 0;
2981 /* Device-specific param,
2982 bit 8: 0/1 = write enabled/protected
2983 bit 4: 0/1 = FUA enabled */
2984 mpd.hd.dev_par = 0;
2985
2986 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
2987 mpd.hd.dev_par = 0x10;
2988 if (scsicmd->cmnd[1] & 0x8)
2989 mpd.hd.bd_length = 0; /* Block descriptor length */
2990 else {
2991 mpd.hd.bd_length = sizeof(mpd.bd);
2992 mpd.hd.data_length += mpd.hd.bd_length;
2993 mpd.bd.block_length[0] =
2994 (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
2995 mpd.bd.block_length[1] =
2996 (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
2997 mpd.bd.block_length[2] =
2998 fsa_dev_ptr[cid].block_size & 0xff;
2999
3000 mpd.mpc_buf[0] = scsicmd->cmnd[2];
3001 if (scsicmd->cmnd[2] == 0x1C) {
3002 /* page length */
3003 mpd.mpc_buf[1] = 0xa;
3004 /* Mode data length */
3005 mpd.hd.data_length = 23;
3006 } else {
3007 /* Mode data length */
3008 mpd.hd.data_length = 15;
3009 }
3010
3011 if (capacity > 0xffffff) {
3012 mpd.bd.block_count[0] = 0xff;
3013 mpd.bd.block_count[1] = 0xff;
3014 mpd.bd.block_count[2] = 0xff;
3015 } else {
3016 mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
3017 mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
3018 mpd.bd.block_count[2] = capacity & 0xff;
3019 }
3020 }
3021 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3022 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3023 mpd.hd.data_length += 3;
3024 mpd.mpc_buf[0] = 8;
3025 mpd.mpc_buf[1] = 1;
3026 mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
3027 ? 0 : 0x04; /* WCE */
3028 mode_buf_length = sizeof(mpd);
3029 }
3030
3031 if (mode_buf_length > scsicmd->cmnd[4])
3032 mode_buf_length = scsicmd->cmnd[4];
3033 else
3034 mode_buf_length = sizeof(mpd);
3035 scsi_sg_copy_from_buffer(scsicmd,
3036 (char *)&mpd,
3037 mode_buf_length);
3038 scsicmd->result = AAC_STAT_GOOD;
3039 break;
3040 }
3041 case MODE_SENSE_10:
3042 {
3043 u32 capacity;
3044 int mode_buf_length = 8;
3045 aac_modep10_data mpd10;
3046
3047 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
3048 capacity = fsa_dev_ptr[cid].size - 1;
3049 else
3050 capacity = (u32)-1;
3051
3052 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
3053 memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
3054 /* Mode data length (MSB) */
3055 mpd10.hd.data_length[0] = 0;
3056 /* Mode data length (LSB) */
3057 mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
3058 /* Medium type - default */
3059 mpd10.hd.med_type = 0;
3060 /* Device-specific param,
3061 bit 8: 0/1 = write enabled/protected
3062 bit 4: 0/1 = FUA enabled */
3063 mpd10.hd.dev_par = 0;
3064
3065 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
3066 mpd10.hd.dev_par = 0x10;
3067 mpd10.hd.rsrvd[0] = 0; /* reserved */
3068 mpd10.hd.rsrvd[1] = 0; /* reserved */
3069 if (scsicmd->cmnd[1] & 0x8) {
3070 /* Block descriptor length (MSB) */
3071 mpd10.hd.bd_length[0] = 0;
3072 /* Block descriptor length (LSB) */
3073 mpd10.hd.bd_length[1] = 0;
3074 } else {
3075 mpd10.hd.bd_length[0] = 0;
3076 mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
3077
3078 mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
3079
3080 mpd10.bd.block_length[0] =
3081 (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
3082 mpd10.bd.block_length[1] =
3083 (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
3084 mpd10.bd.block_length[2] =
3085 fsa_dev_ptr[cid].block_size & 0xff;
3086
3087 if (capacity > 0xffffff) {
3088 mpd10.bd.block_count[0] = 0xff;
3089 mpd10.bd.block_count[1] = 0xff;
3090 mpd10.bd.block_count[2] = 0xff;
3091 } else {
3092 mpd10.bd.block_count[0] =
3093 (capacity >> 16) & 0xff;
3094 mpd10.bd.block_count[1] =
3095 (capacity >> 8) & 0xff;
3096 mpd10.bd.block_count[2] =
3097 capacity & 0xff;
3098 }
3099 }
3100 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
3101 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
3102 mpd10.hd.data_length[1] += 3;
3103 mpd10.mpc_buf[0] = 8;
3104 mpd10.mpc_buf[1] = 1;
3105 mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
3106 ? 0 : 0x04; /* WCE */
3107 mode_buf_length = sizeof(mpd10);
3108 if (mode_buf_length > scsicmd->cmnd[8])
3109 mode_buf_length = scsicmd->cmnd[8];
3110 }
3111 scsi_sg_copy_from_buffer(scsicmd,
3112 (char *)&mpd10,
3113 mode_buf_length);
3114
3115 scsicmd->result = AAC_STAT_GOOD;
3116 break;
3117 }
3118 case REQUEST_SENSE:
3119 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
3120 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3121 sizeof(struct sense_data));
3122 memset(&dev->fsa_dev[cid].sense_data, 0,
3123 sizeof(struct sense_data));
3124 scsicmd->result = AAC_STAT_GOOD;
3125 break;
3126
3127 case ALLOW_MEDIUM_REMOVAL:
3128 dprintk((KERN_DEBUG "LOCK command.\n"));
3129 if (scsicmd->cmnd[4])
3130 fsa_dev_ptr[cid].locked = 1;
3131 else
3132 fsa_dev_ptr[cid].locked = 0;
3133
3134 scsicmd->result = AAC_STAT_GOOD;
3135 break;
3136 /*
3137 * These commands are all No-Ops
3138 */
3139 case TEST_UNIT_READY:
3140 if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
3141 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3142 SAM_STAT_CHECK_CONDITION;
3143 set_sense(&dev->fsa_dev[cid].sense_data,
3144 NOT_READY, SENCODE_BECOMING_READY,
3145 ASENCODE_BECOMING_READY, 0, 0);
3146 memcpy(scsicmd->sense_buffer,
3147 &dev->fsa_dev[cid].sense_data,
3148 min_t(size_t,
3149 sizeof(dev->fsa_dev[cid].sense_data),
3150 SCSI_SENSE_BUFFERSIZE));
3151 break;
3152 }
3153 case RESERVE:
3154 case RELEASE:
3155 case REZERO_UNIT:
3156 case REASSIGN_BLOCKS:
3157 case SEEK_10:
3158 scsicmd->result = AAC_STAT_GOOD;
3159 break;
3160
3161 case START_STOP:
3162 return aac_start_stop(scsicmd);
3163
3164 /* FALLTHRU */
3165 default:
3166 /*
3167 * Unhandled commands
3168 */
3169 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
3170 scsicmd->cmnd[0]));
3171 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3172 SAM_STAT_CHECK_CONDITION;
3173 set_sense(&dev->fsa_dev[cid].sense_data,
3174 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
3175 ASENCODE_INVALID_COMMAND, 0, 0);
3176 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3177 min_t(size_t,
3178 sizeof(dev->fsa_dev[cid].sense_data),
3179 SCSI_SENSE_BUFFERSIZE));
3180 }
3181
3182 scsi_done_ret:
3183
3184 scsicmd->scsi_done(scsicmd);
3185 return 0;
3186 }
3187
3188 static int query_disk(struct aac_dev *dev, void __user *arg)
3189 {
3190 struct aac_query_disk qd;
3191 struct fsa_dev_info *fsa_dev_ptr;
3192
3193 fsa_dev_ptr = dev->fsa_dev;
3194 if (!fsa_dev_ptr)
3195 return -EBUSY;
3196 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3197 return -EFAULT;
3198 if (qd.cnum == -1)
3199 qd.cnum = qd.id;
3200 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
3201 {
3202 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3203 return -EINVAL;
3204 qd.instance = dev->scsi_host_ptr->host_no;
3205 qd.bus = 0;
3206 qd.id = CONTAINER_TO_ID(qd.cnum);
3207 qd.lun = CONTAINER_TO_LUN(qd.cnum);
3208 }
3209 else return -EINVAL;
3210
3211 qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
3212 qd.locked = fsa_dev_ptr[qd.cnum].locked;
3213 qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
3214
3215 if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
3216 qd.unmapped = 1;
3217 else
3218 qd.unmapped = 0;
3219
3220 strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
3221 min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
3222
3223 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
3224 return -EFAULT;
3225 return 0;
3226 }
3227
3228 static int force_delete_disk(struct aac_dev *dev, void __user *arg)
3229 {
3230 struct aac_delete_disk dd;
3231 struct fsa_dev_info *fsa_dev_ptr;
3232
3233 fsa_dev_ptr = dev->fsa_dev;
3234 if (!fsa_dev_ptr)
3235 return -EBUSY;
3236
3237 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3238 return -EFAULT;
3239
3240 if (dd.cnum >= dev->maximum_num_containers)
3241 return -EINVAL;
3242 /*
3243 * Mark this container as being deleted.
3244 */
3245 fsa_dev_ptr[dd.cnum].deleted = 1;
3246 /*
3247 * Mark the container as no longer valid
3248 */
3249 fsa_dev_ptr[dd.cnum].valid = 0;
3250 return 0;
3251 }
3252
3253 static int delete_disk(struct aac_dev *dev, void __user *arg)
3254 {
3255 struct aac_delete_disk dd;
3256 struct fsa_dev_info *fsa_dev_ptr;
3257
3258 fsa_dev_ptr = dev->fsa_dev;
3259 if (!fsa_dev_ptr)
3260 return -EBUSY;
3261
3262 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
3263 return -EFAULT;
3264
3265 if (dd.cnum >= dev->maximum_num_containers)
3266 return -EINVAL;
3267 /*
3268 * If the container is locked, it can not be deleted by the API.
3269 */
3270 if (fsa_dev_ptr[dd.cnum].locked)
3271 return -EBUSY;
3272 else {
3273 /*
3274 * Mark the container as no longer being valid.
3275 */
3276 fsa_dev_ptr[dd.cnum].valid = 0;
3277 fsa_dev_ptr[dd.cnum].devname[0] = '\0';
3278 return 0;
3279 }
3280 }
3281
3282 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
3283 {
3284 switch (cmd) {
3285 case FSACTL_QUERY_DISK:
3286 return query_disk(dev, arg);
3287 case FSACTL_DELETE_DISK:
3288 return delete_disk(dev, arg);
3289 case FSACTL_FORCE_DELETE_DISK:
3290 return force_delete_disk(dev, arg);
3291 case FSACTL_GET_CONTAINERS:
3292 return aac_get_containers(dev);
3293 default:
3294 return -ENOTTY;
3295 }
3296 }
3297
3298 /**
3299 *
3300 * aac_srb_callback
3301 * @context: the context set in the fib - here it is scsi cmd
3302 * @fibptr: pointer to the fib
3303 *
3304 * Handles the completion of a scsi command to a non dasd device
3305 *
3306 */
3307
3308 static void aac_srb_callback(void *context, struct fib * fibptr)
3309 {
3310 struct aac_dev *dev;
3311 struct aac_srb_reply *srbreply;
3312 struct scsi_cmnd *scsicmd;
3313
3314 scsicmd = (struct scsi_cmnd *) context;
3315
3316 if (!aac_valid_context(scsicmd, fibptr))
3317 return;
3318
3319 BUG_ON(fibptr == NULL);
3320
3321 dev = fibptr->dev;
3322
3323 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3324
3325 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
3326
3327 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3328 /* fast response */
3329 srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
3330 srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
3331 } else {
3332 /*
3333 * Calculate resid for sg
3334 */
3335 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
3336 - le32_to_cpu(srbreply->data_xfer_length));
3337 }
3338
3339
3340 scsi_dma_unmap(scsicmd);
3341
3342 /* expose physical device if expose_physicald flag is on */
3343 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
3344 && expose_physicals > 0)
3345 aac_expose_phy_device(scsicmd);
3346
3347 /*
3348 * First check the fib status
3349 */
3350
3351 if (le32_to_cpu(srbreply->status) != ST_OK) {
3352 int len;
3353
3354 pr_warn("aac_srb_callback: srb failed, status = %d\n",
3355 le32_to_cpu(srbreply->status));
3356 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3357 SCSI_SENSE_BUFFERSIZE);
3358 scsicmd->result = DID_ERROR << 16
3359 | COMMAND_COMPLETE << 8
3360 | SAM_STAT_CHECK_CONDITION;
3361 memcpy(scsicmd->sense_buffer,
3362 srbreply->sense_data, len);
3363 }
3364
3365 /*
3366 * Next check the srb status
3367 */
3368 switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3369 case SRB_STATUS_ERROR_RECOVERY:
3370 case SRB_STATUS_PENDING:
3371 case SRB_STATUS_SUCCESS:
3372 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3373 break;
3374 case SRB_STATUS_DATA_OVERRUN:
3375 switch (scsicmd->cmnd[0]) {
3376 case READ_6:
3377 case WRITE_6:
3378 case READ_10:
3379 case WRITE_10:
3380 case READ_12:
3381 case WRITE_12:
3382 case READ_16:
3383 case WRITE_16:
3384 if (le32_to_cpu(srbreply->data_xfer_length)
3385 < scsicmd->underflow)
3386 pr_warn("aacraid: SCSI CMD underflow\n");
3387 else
3388 pr_warn("aacraid: SCSI CMD Data Overrun\n");
3389 scsicmd->result = DID_ERROR << 16
3390 | COMMAND_COMPLETE << 8;
3391 break;
3392 case INQUIRY:
3393 scsicmd->result = DID_OK << 16
3394 | COMMAND_COMPLETE << 8;
3395 break;
3396 default:
3397 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3398 break;
3399 }
3400 break;
3401 case SRB_STATUS_ABORTED:
3402 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3403 break;
3404 case SRB_STATUS_ABORT_FAILED:
3405 /*
3406 * Not sure about this one - but assuming the
3407 * hba was trying to abort for some reason
3408 */
3409 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3410 break;
3411 case SRB_STATUS_PARITY_ERROR:
3412 scsicmd->result = DID_PARITY << 16
3413 | MSG_PARITY_ERROR << 8;
3414 break;
3415 case SRB_STATUS_NO_DEVICE:
3416 case SRB_STATUS_INVALID_PATH_ID:
3417 case SRB_STATUS_INVALID_TARGET_ID:
3418 case SRB_STATUS_INVALID_LUN:
3419 case SRB_STATUS_SELECTION_TIMEOUT:
3420 scsicmd->result = DID_NO_CONNECT << 16
3421 | COMMAND_COMPLETE << 8;
3422 break;
3423
3424 case SRB_STATUS_COMMAND_TIMEOUT:
3425 case SRB_STATUS_TIMEOUT:
3426 scsicmd->result = DID_TIME_OUT << 16
3427 | COMMAND_COMPLETE << 8;
3428 break;
3429
3430 case SRB_STATUS_BUSY:
3431 scsicmd->result = DID_BUS_BUSY << 16
3432 | COMMAND_COMPLETE << 8;
3433 break;
3434
3435 case SRB_STATUS_BUS_RESET:
3436 scsicmd->result = DID_RESET << 16
3437 | COMMAND_COMPLETE << 8;
3438 break;
3439
3440 case SRB_STATUS_MESSAGE_REJECTED:
3441 scsicmd->result = DID_ERROR << 16
3442 | MESSAGE_REJECT << 8;
3443 break;
3444 case SRB_STATUS_REQUEST_FLUSHED:
3445 case SRB_STATUS_ERROR:
3446 case SRB_STATUS_INVALID_REQUEST:
3447 case SRB_STATUS_REQUEST_SENSE_FAILED:
3448 case SRB_STATUS_NO_HBA:
3449 case SRB_STATUS_UNEXPECTED_BUS_FREE:
3450 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3451 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3452 case SRB_STATUS_DELAYED_RETRY:
3453 case SRB_STATUS_BAD_FUNCTION:
3454 case SRB_STATUS_NOT_STARTED:
3455 case SRB_STATUS_NOT_IN_USE:
3456 case SRB_STATUS_FORCE_ABORT:
3457 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3458 default:
3459 #ifdef AAC_DETAILED_STATUS_INFO
3460 pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3461 le32_to_cpu(srbreply->srb_status) & 0x3F,
3462 aac_get_status_string(
3463 le32_to_cpu(srbreply->srb_status) & 0x3F),
3464 scsicmd->cmnd[0],
3465 le32_to_cpu(srbreply->scsi_status));
3466 #endif
3467 /*
3468 * When the CC bit is SET by the host in ATA pass thru CDB,
3469 * driver is supposed to return DID_OK
3470 *
3471 * When the CC bit is RESET by the host, driver should
3472 * return DID_ERROR
3473 */
3474 if ((scsicmd->cmnd[0] == ATA_12)
3475 || (scsicmd->cmnd[0] == ATA_16)) {
3476
3477 if (scsicmd->cmnd[2] & (0x01 << 5)) {
3478 scsicmd->result = DID_OK << 16
3479 | COMMAND_COMPLETE << 8;
3480 break;
3481 } else {
3482 scsicmd->result = DID_ERROR << 16
3483 | COMMAND_COMPLETE << 8;
3484 break;
3485 }
3486 } else {
3487 scsicmd->result = DID_ERROR << 16
3488 | COMMAND_COMPLETE << 8;
3489 break;
3490 }
3491 }
3492 if (le32_to_cpu(srbreply->scsi_status)
3493 == SAM_STAT_CHECK_CONDITION) {
3494 int len;
3495
3496 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3497 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3498 SCSI_SENSE_BUFFERSIZE);
3499 #ifdef AAC_DETAILED_STATUS_INFO
3500 pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3501 le32_to_cpu(srbreply->status), len);
3502 #endif
3503 memcpy(scsicmd->sense_buffer,
3504 srbreply->sense_data, len);
3505 }
3506
3507 /*
3508 * OR in the scsi status (already shifted up a bit)
3509 */
3510 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
3511
3512 aac_fib_complete(fibptr);
3513 scsicmd->scsi_done(scsicmd);
3514 }
3515
3516 static void hba_resp_task_complete(struct aac_dev *dev,
3517 struct scsi_cmnd *scsicmd,
3518 struct aac_hba_resp *err) {
3519
3520 scsicmd->result = err->status;
3521 /* set residual count */
3522 scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3523
3524 switch (err->status) {
3525 case SAM_STAT_GOOD:
3526 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3527 break;
3528 case SAM_STAT_CHECK_CONDITION:
3529 {
3530 int len;
3531
3532 len = min_t(u8, err->sense_response_data_len,
3533 SCSI_SENSE_BUFFERSIZE);
3534 if (len)
3535 memcpy(scsicmd->sense_buffer,
3536 err->sense_response_buf, len);
3537 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3538 break;
3539 }
3540 case SAM_STAT_BUSY:
3541 scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3542 break;
3543 case SAM_STAT_TASK_ABORTED:
3544 scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3545 break;
3546 case SAM_STAT_RESERVATION_CONFLICT:
3547 case SAM_STAT_TASK_SET_FULL:
3548 default:
3549 scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3550 break;
3551 }
3552 }
3553
3554 static void hba_resp_task_failure(struct aac_dev *dev,
3555 struct scsi_cmnd *scsicmd,
3556 struct aac_hba_resp *err)
3557 {
3558 switch (err->status) {
3559 case HBA_RESP_STAT_HBAMODE_DISABLED:
3560 {
3561 u32 bus, cid;
3562
3563 bus = aac_logical_to_phys(scmd_channel(scsicmd));
3564 cid = scmd_id(scsicmd);
3565 if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3566 dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3567 dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3568 }
3569 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3570 break;
3571 }
3572 case HBA_RESP_STAT_IO_ERROR:
3573 case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3574 scsicmd->result = DID_OK << 16 |
3575 COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3576 break;
3577 case HBA_RESP_STAT_IO_ABORTED:
3578 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3579 break;
3580 case HBA_RESP_STAT_INVALID_DEVICE:
3581 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3582 break;
3583 case HBA_RESP_STAT_UNDERRUN:
3584 /* UNDERRUN is OK */
3585 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3586 break;
3587 case HBA_RESP_STAT_OVERRUN:
3588 default:
3589 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3590 break;
3591 }
3592 }
3593
3594 /**
3595 *
3596 * aac_hba_callback
3597 * @context: the context set in the fib - here it is scsi cmd
3598 * @fibptr: pointer to the fib
3599 *
3600 * Handles the completion of a native HBA scsi command
3601 *
3602 */
3603 void aac_hba_callback(void *context, struct fib *fibptr)
3604 {
3605 struct aac_dev *dev;
3606 struct scsi_cmnd *scsicmd;
3607
3608 struct aac_hba_resp *err =
3609 &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3610
3611 scsicmd = (struct scsi_cmnd *) context;
3612
3613 if (!aac_valid_context(scsicmd, fibptr))
3614 return;
3615
3616 WARN_ON(fibptr == NULL);
3617 dev = fibptr->dev;
3618
3619 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3620 scsi_dma_unmap(scsicmd);
3621
3622 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3623 /* fast response */
3624 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3625 goto out;
3626 }
3627
3628 switch (err->service_response) {
3629 case HBA_RESP_SVCRES_TASK_COMPLETE:
3630 hba_resp_task_complete(dev, scsicmd, err);
3631 break;
3632 case HBA_RESP_SVCRES_FAILURE:
3633 hba_resp_task_failure(dev, scsicmd, err);
3634 break;
3635 case HBA_RESP_SVCRES_TMF_REJECTED:
3636 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3637 break;
3638 case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3639 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3640 break;
3641 case HBA_RESP_SVCRES_TMF_COMPLETE:
3642 case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3643 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3644 break;
3645 default:
3646 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3647 break;
3648 }
3649
3650 out:
3651 aac_fib_complete(fibptr);
3652
3653 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3654 scsicmd->SCp.sent_command = 1;
3655 else
3656 scsicmd->scsi_done(scsicmd);
3657 }
3658
3659 /**
3660 *
3661 * aac_send_srb_fib
3662 * @scsicmd: the scsi command block
3663 *
3664 * This routine will form a FIB and fill in the aac_srb from the
3665 * scsicmd passed in.
3666 */
3667
3668 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3669 {
3670 struct fib* cmd_fibcontext;
3671 struct aac_dev* dev;
3672 int status;
3673
3674 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3675 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3676 scsicmd->device->lun > 7) {
3677 scsicmd->result = DID_NO_CONNECT << 16;
3678 scsicmd->scsi_done(scsicmd);
3679 return 0;
3680 }
3681
3682 /*
3683 * Allocate and initialize a Fib then setup a BlockWrite command
3684 */
3685 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3686
3687 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
3688
3689 /*
3690 * Check that the command queued to the controller
3691 */
3692 if (status == -EINPROGRESS) {
3693 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3694 return 0;
3695 }
3696
3697 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
3698 aac_fib_complete(cmd_fibcontext);
3699 aac_fib_free(cmd_fibcontext);
3700
3701 return -1;
3702 }
3703
3704 /**
3705 *
3706 * aac_send_hba_fib
3707 * @scsicmd: the scsi command block
3708 *
3709 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3710 * scsicmd passed in.
3711 */
3712 static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3713 {
3714 struct fib *cmd_fibcontext;
3715 struct aac_dev *dev;
3716 int status;
3717
3718 dev = shost_priv(scsicmd->device->host);
3719 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3720 scsicmd->device->lun > AAC_MAX_LUN - 1) {
3721 scsicmd->result = DID_NO_CONNECT << 16;
3722 scsicmd->scsi_done(scsicmd);
3723 return 0;
3724 }
3725
3726 /*
3727 * Allocate and initialize a Fib then setup a BlockWrite command
3728 */
3729 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3730 if (!cmd_fibcontext)
3731 return -1;
3732
3733 status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3734
3735 /*
3736 * Check that the command queued to the controller
3737 */
3738 if (status == -EINPROGRESS) {
3739 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3740 return 0;
3741 }
3742
3743 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3744 status);
3745 aac_fib_complete(cmd_fibcontext);
3746 aac_fib_free(cmd_fibcontext);
3747
3748 return -1;
3749 }
3750
3751
3752 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3753 {
3754 struct aac_dev *dev;
3755 unsigned long byte_count = 0;
3756 int nseg;
3757
3758 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3759 // Get rid of old data
3760 psg->count = 0;
3761 psg->sg[0].addr = 0;
3762 psg->sg[0].count = 0;
3763
3764 nseg = scsi_dma_map(scsicmd);
3765 if (nseg < 0)
3766 return nseg;
3767 if (nseg) {
3768 struct scatterlist *sg;
3769 int i;
3770
3771 psg->count = cpu_to_le32(nseg);
3772
3773 scsi_for_each_sg(scsicmd, sg, nseg, i) {
3774 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
3775 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
3776 byte_count += sg_dma_len(sg);
3777 }
3778 /* hba wants the size to be exact */
3779 if (byte_count > scsi_bufflen(scsicmd)) {
3780 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3781 (byte_count - scsi_bufflen(scsicmd));
3782 psg->sg[i-1].count = cpu_to_le32(temp);
3783 byte_count = scsi_bufflen(scsicmd);
3784 }
3785 /* Check for command underflow */
3786 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
3787 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3788 byte_count, scsicmd->underflow);
3789 }
3790 }
3791 return byte_count;
3792 }
3793
3794
3795 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3796 {
3797 struct aac_dev *dev;
3798 unsigned long byte_count = 0;
3799 u64 addr;
3800 int nseg;
3801
3802 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3803 // Get rid of old data
3804 psg->count = 0;
3805 psg->sg[0].addr[0] = 0;
3806 psg->sg[0].addr[1] = 0;
3807 psg->sg[0].count = 0;
3808
3809 nseg = scsi_dma_map(scsicmd);
3810 if (nseg < 0)
3811 return nseg;
3812 if (nseg) {
3813 struct scatterlist *sg;
3814 int i;
3815
3816 scsi_for_each_sg(scsicmd, sg, nseg, i) {
3817 int count = sg_dma_len(sg);
3818 addr = sg_dma_address(sg);
3819 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
3820 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
3821 psg->sg[i].count = cpu_to_le32(count);
3822 byte_count += count;
3823 }
3824 psg->count = cpu_to_le32(nseg);
3825 /* hba wants the size to be exact */
3826 if (byte_count > scsi_bufflen(scsicmd)) {
3827 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3828 (byte_count - scsi_bufflen(scsicmd));
3829 psg->sg[i-1].count = cpu_to_le32(temp);
3830 byte_count = scsi_bufflen(scsicmd);
3831 }
3832 /* Check for command underflow */
3833 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
3834 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3835 byte_count, scsicmd->underflow);
3836 }
3837 }
3838 return byte_count;
3839 }
3840
3841 static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
3842 {
3843 unsigned long byte_count = 0;
3844 int nseg;
3845
3846 // Get rid of old data
3847 psg->count = 0;
3848 psg->sg[0].next = 0;
3849 psg->sg[0].prev = 0;
3850 psg->sg[0].addr[0] = 0;
3851 psg->sg[0].addr[1] = 0;
3852 psg->sg[0].count = 0;
3853 psg->sg[0].flags = 0;
3854
3855 nseg = scsi_dma_map(scsicmd);
3856 if (nseg < 0)
3857 return nseg;
3858 if (nseg) {
3859 struct scatterlist *sg;
3860 int i;
3861
3862 scsi_for_each_sg(scsicmd, sg, nseg, i) {
3863 int count = sg_dma_len(sg);
3864 u64 addr = sg_dma_address(sg);
3865 psg->sg[i].next = 0;
3866 psg->sg[i].prev = 0;
3867 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
3868 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
3869 psg->sg[i].count = cpu_to_le32(count);
3870 psg->sg[i].flags = 0;
3871 byte_count += count;
3872 }
3873 psg->count = cpu_to_le32(nseg);
3874 /* hba wants the size to be exact */
3875 if (byte_count > scsi_bufflen(scsicmd)) {
3876 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3877 (byte_count - scsi_bufflen(scsicmd));
3878 psg->sg[i-1].count = cpu_to_le32(temp);
3879 byte_count = scsi_bufflen(scsicmd);
3880 }
3881 /* Check for command underflow */
3882 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
3883 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3884 byte_count, scsicmd->underflow);
3885 }
3886 }
3887 return byte_count;
3888 }
3889
3890 static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
3891 struct aac_raw_io2 *rio2, int sg_max)
3892 {
3893 unsigned long byte_count = 0;
3894 int nseg;
3895
3896 nseg = scsi_dma_map(scsicmd);
3897 if (nseg < 0)
3898 return nseg;
3899 if (nseg) {
3900 struct scatterlist *sg;
3901 int i, conformable = 0;
3902 u32 min_size = PAGE_SIZE, cur_size;
3903
3904 scsi_for_each_sg(scsicmd, sg, nseg, i) {
3905 int count = sg_dma_len(sg);
3906 u64 addr = sg_dma_address(sg);
3907
3908 BUG_ON(i >= sg_max);
3909 rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
3910 rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
3911 cur_size = cpu_to_le32(count);
3912 rio2->sge[i].length = cur_size;
3913 rio2->sge[i].flags = 0;
3914 if (i == 0) {
3915 conformable = 1;
3916 rio2->sgeFirstSize = cur_size;
3917 } else if (i == 1) {
3918 rio2->sgeNominalSize = cur_size;
3919 min_size = cur_size;
3920 } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
3921 conformable = 0;
3922 if (cur_size < min_size)
3923 min_size = cur_size;
3924 }
3925 byte_count += count;
3926 }
3927
3928 /* hba wants the size to be exact */
3929 if (byte_count > scsi_bufflen(scsicmd)) {
3930 u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
3931 (byte_count - scsi_bufflen(scsicmd));
3932 rio2->sge[i-1].length = cpu_to_le32(temp);
3933 byte_count = scsi_bufflen(scsicmd);
3934 }
3935
3936 rio2->sgeCnt = cpu_to_le32(nseg);
3937 rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
3938 /* not conformable: evaluate required sg elements */
3939 if (!conformable) {
3940 int j, nseg_new = nseg, err_found;
3941 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
3942 err_found = 0;
3943 nseg_new = 2;
3944 for (j = 1; j < nseg - 1; ++j) {
3945 if (rio2->sge[j].length % (i*PAGE_SIZE)) {
3946 err_found = 1;
3947 break;
3948 }
3949 nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
3950 }
3951 if (!err_found)
3952 break;
3953 }
3954 if (i > 0 && nseg_new <= sg_max)
3955 aac_convert_sgraw2(rio2, i, nseg, nseg_new);
3956 } else
3957 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
3958
3959 /* Check for command underflow */
3960 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
3961 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
3962 byte_count, scsicmd->underflow);
3963 }
3964 }
3965
3966 return byte_count;
3967 }
3968
3969 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
3970 {
3971 struct sge_ieee1212 *sge;
3972 int i, j, pos;
3973 u32 addr_low;
3974
3975 if (aac_convert_sgl == 0)
3976 return 0;
3977
3978 sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
3979 if (sge == NULL)
3980 return -1;
3981
3982 for (i = 1, pos = 1; i < nseg-1; ++i) {
3983 for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
3984 addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
3985 sge[pos].addrLow = addr_low;
3986 sge[pos].addrHigh = rio2->sge[i].addrHigh;
3987 if (addr_low < rio2->sge[i].addrLow)
3988 sge[pos].addrHigh++;
3989 sge[pos].length = pages * PAGE_SIZE;
3990 sge[pos].flags = 0;
3991 pos++;
3992 }
3993 }
3994 sge[pos] = rio2->sge[nseg-1];
3995 memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
3996
3997 kfree(sge);
3998 rio2->sgeCnt = cpu_to_le32(nseg_new);
3999 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
4000 rio2->sgeNominalSize = pages * PAGE_SIZE;
4001 return 0;
4002 }
4003
4004 static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4005 struct aac_hba_cmd_req *hbacmd,
4006 int sg_max,
4007 u64 sg_address)
4008 {
4009 unsigned long byte_count = 0;
4010 int nseg;
4011 struct scatterlist *sg;
4012 int i;
4013 u32 cur_size;
4014 struct aac_hba_sgl *sge;
4015
4016 nseg = scsi_dma_map(scsicmd);
4017 if (nseg <= 0) {
4018 byte_count = nseg;
4019 goto out;
4020 }
4021
4022 if (nseg > HBA_MAX_SG_EMBEDDED)
4023 sge = &hbacmd->sge[2];
4024 else
4025 sge = &hbacmd->sge[0];
4026
4027 scsi_for_each_sg(scsicmd, sg, nseg, i) {
4028 int count = sg_dma_len(sg);
4029 u64 addr = sg_dma_address(sg);
4030
4031 WARN_ON(i >= sg_max);
4032 sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4033 sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4034 cur_size = cpu_to_le32(count);
4035 sge->len = cur_size;
4036 sge->flags = 0;
4037 byte_count += count;
4038 sge++;
4039 }
4040
4041 sge--;
4042 /* hba wants the size to be exact */
4043 if (byte_count > scsi_bufflen(scsicmd)) {
4044 u32 temp;
4045
4046 temp = le32_to_cpu(sge->len) - byte_count
4047 - scsi_bufflen(scsicmd);
4048 sge->len = cpu_to_le32(temp);
4049 byte_count = scsi_bufflen(scsicmd);
4050 }
4051
4052 if (nseg <= HBA_MAX_SG_EMBEDDED) {
4053 hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4054 sge->flags = cpu_to_le32(0x40000000);
4055 } else {
4056 /* not embedded */
4057 hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4058 hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4059 hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4060 hbacmd->sge[0].addr_lo =
4061 cpu_to_le32((u32)(sg_address & 0xffffffff));
4062 }
4063
4064 /* Check for command underflow */
4065 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4066 pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4067 byte_count, scsicmd->underflow);
4068 }
4069 out:
4070 return byte_count;
4071 }
4072
4073 #ifdef AAC_DETAILED_STATUS_INFO
4074
4075 struct aac_srb_status_info {
4076 u32 status;
4077 char *str;
4078 };
4079
4080
4081 static struct aac_srb_status_info srb_status_info[] = {
4082 { SRB_STATUS_PENDING, "Pending Status"},
4083 { SRB_STATUS_SUCCESS, "Success"},
4084 { SRB_STATUS_ABORTED, "Aborted Command"},
4085 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
4086 { SRB_STATUS_ERROR, "Error Event"},
4087 { SRB_STATUS_BUSY, "Device Busy"},
4088 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
4089 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
4090 { SRB_STATUS_NO_DEVICE, "No Device"},
4091 { SRB_STATUS_TIMEOUT, "Timeout"},
4092 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
4093 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
4094 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
4095 { SRB_STATUS_BUS_RESET, "Bus Reset"},
4096 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
4097 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
4098 { SRB_STATUS_NO_HBA, "No HBA"},
4099 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
4100 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
4101 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
4102 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
4103 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
4104 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
4105 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
4106 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
4107 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
4108 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
4109 { SRB_STATUS_NOT_STARTED, "Not Started"},
4110 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
4111 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
4112 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
4113 { 0xff, "Unknown Error"}
4114 };
4115
4116 char *aac_get_status_string(u32 status)
4117 {
4118 int i;
4119
4120 for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
4121 if (srb_status_info[i].status == status)
4122 return srb_status_info[i].str;
4123
4124 return "Bad Status Code";
4125 }
4126
4127 #endif