]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/mvumi.c
HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / mvumi.c
1 /*
2 * Marvell UMI driver
3 *
4 * Copyright 2011 Marvell. <jyli@marvell.com>
5 *
6 * This file is licensed under GPLv2.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; version 2 of the
11 * License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/device.h>
29 #include <linux/pci.h>
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/blkdev.h>
35 #include <linux/io.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_transport.h>
41 #include <scsi/scsi_eh.h>
42 #include <linux/uaccess.h>
43 #include <linux/kthread.h>
44
45 #include "mvumi.h"
46
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("jyli@marvell.com");
49 MODULE_DESCRIPTION("Marvell UMI Driver");
50
51 static const struct pci_device_id mvumi_pci_table[] = {
52 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
53 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
54 { 0 }
55 };
56
57 MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
58
59 static void tag_init(struct mvumi_tag *st, unsigned short size)
60 {
61 unsigned short i;
62 BUG_ON(size != st->size);
63 st->top = size;
64 for (i = 0; i < size; i++)
65 st->stack[i] = size - 1 - i;
66 }
67
68 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
69 {
70 BUG_ON(st->top <= 0);
71 return st->stack[--st->top];
72 }
73
74 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
75 unsigned short tag)
76 {
77 BUG_ON(st->top >= st->size);
78 st->stack[st->top++] = tag;
79 }
80
81 static bool tag_is_empty(struct mvumi_tag *st)
82 {
83 if (st->top == 0)
84 return 1;
85 else
86 return 0;
87 }
88
89 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
90 {
91 int i;
92
93 for (i = 0; i < MAX_BASE_ADDRESS; i++)
94 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
95 addr_array[i])
96 pci_iounmap(dev, addr_array[i]);
97 }
98
99 static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
100 {
101 int i;
102
103 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
104 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
105 addr_array[i] = pci_iomap(dev, i, 0);
106 if (!addr_array[i]) {
107 dev_err(&dev->dev, "failed to map Bar[%d]\n",
108 i);
109 mvumi_unmap_pci_addr(dev, addr_array);
110 return -ENOMEM;
111 }
112 } else
113 addr_array[i] = NULL;
114
115 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
116 }
117
118 return 0;
119 }
120
121 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
122 enum resource_type type, unsigned int size)
123 {
124 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
125
126 if (!res) {
127 dev_err(&mhba->pdev->dev,
128 "Failed to allocate memory for resource manager.\n");
129 return NULL;
130 }
131
132 switch (type) {
133 case RESOURCE_CACHED_MEMORY:
134 res->virt_addr = kzalloc(size, GFP_ATOMIC);
135 if (!res->virt_addr) {
136 dev_err(&mhba->pdev->dev,
137 "unable to allocate memory,size = %d.\n", size);
138 kfree(res);
139 return NULL;
140 }
141 break;
142
143 case RESOURCE_UNCACHED_MEMORY:
144 size = round_up(size, 8);
145 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
146 &res->bus_addr);
147 if (!res->virt_addr) {
148 dev_err(&mhba->pdev->dev,
149 "unable to allocate consistent mem,"
150 "size = %d.\n", size);
151 kfree(res);
152 return NULL;
153 }
154 break;
155
156 default:
157 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
158 kfree(res);
159 return NULL;
160 }
161
162 res->type = type;
163 res->size = size;
164 INIT_LIST_HEAD(&res->entry);
165 list_add_tail(&res->entry, &mhba->res_list);
166
167 return res;
168 }
169
170 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
171 {
172 struct mvumi_res *res, *tmp;
173
174 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
175 switch (res->type) {
176 case RESOURCE_UNCACHED_MEMORY:
177 pci_free_consistent(mhba->pdev, res->size,
178 res->virt_addr, res->bus_addr);
179 break;
180 case RESOURCE_CACHED_MEMORY:
181 kfree(res->virt_addr);
182 break;
183 default:
184 dev_err(&mhba->pdev->dev,
185 "unknown resource type %d\n", res->type);
186 break;
187 }
188 list_del(&res->entry);
189 kfree(res);
190 }
191 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
192 }
193
194 /**
195 * mvumi_make_sgl - Prepares SGL
196 * @mhba: Adapter soft state
197 * @scmd: SCSI command from the mid-layer
198 * @sgl_p: SGL to be filled in
199 * @sg_count return the number of SG elements
200 *
201 * If successful, this function returns 0. otherwise, it returns -1.
202 */
203 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
204 void *sgl_p, unsigned char *sg_count)
205 {
206 struct scatterlist *sg;
207 struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
208 unsigned int i;
209 unsigned int sgnum = scsi_sg_count(scmd);
210 dma_addr_t busaddr;
211
212 if (sgnum) {
213 sg = scsi_sglist(scmd);
214 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
215 (int) scmd->sc_data_direction);
216 if (*sg_count > mhba->max_sge) {
217 dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
218 "than max sg[0x%x].\n",
219 *sg_count, mhba->max_sge);
220 return -1;
221 }
222 for (i = 0; i < *sg_count; i++) {
223 busaddr = sg_dma_address(&sg[i]);
224 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
225 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
226 m_sg->flags = 0;
227 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
228 if ((i + 1) == *sg_count)
229 m_sg->flags |= 1U << mhba->eot_flag;
230
231 sgd_inc(mhba, m_sg);
232 }
233 } else {
234 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
235 pci_map_single(mhba->pdev, scsi_sglist(scmd),
236 scsi_bufflen(scmd),
237 (int) scmd->sc_data_direction)
238 : 0;
239 busaddr = scmd->SCp.dma_handle;
240 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
241 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
242 m_sg->flags = 1U << mhba->eot_flag;
243 sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
244 *sg_count = 1;
245 }
246
247 return 0;
248 }
249
250 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
251 unsigned int size)
252 {
253 struct mvumi_sgl *m_sg;
254 void *virt_addr;
255 dma_addr_t phy_addr;
256
257 if (size == 0)
258 return 0;
259
260 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
261 if (!virt_addr)
262 return -1;
263
264 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
265 cmd->frame->sg_counts = 1;
266 cmd->data_buf = virt_addr;
267
268 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
269 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
270 m_sg->flags = 1U << mhba->eot_flag;
271 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
272
273 return 0;
274 }
275
276 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
277 unsigned int buf_size)
278 {
279 struct mvumi_cmd *cmd;
280
281 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
282 if (!cmd) {
283 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
284 return NULL;
285 }
286 INIT_LIST_HEAD(&cmd->queue_pointer);
287
288 cmd->frame = pci_alloc_consistent(mhba->pdev,
289 mhba->ib_max_size, &cmd->frame_phys);
290 if (!cmd->frame) {
291 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
292 " frame,size = %d.\n", mhba->ib_max_size);
293 kfree(cmd);
294 return NULL;
295 }
296
297 if (buf_size) {
298 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
299 dev_err(&mhba->pdev->dev, "failed to allocate memory"
300 " for internal frame\n");
301 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
302 cmd->frame, cmd->frame_phys);
303 kfree(cmd);
304 return NULL;
305 }
306 } else
307 cmd->frame->sg_counts = 0;
308
309 return cmd;
310 }
311
312 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
313 struct mvumi_cmd *cmd)
314 {
315 struct mvumi_sgl *m_sg;
316 unsigned int size;
317 dma_addr_t phy_addr;
318
319 if (cmd && cmd->frame) {
320 if (cmd->frame->sg_counts) {
321 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
322 sgd_getsz(mhba, m_sg, size);
323
324 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
325 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
326
327 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
328 phy_addr);
329 }
330 pci_free_consistent(mhba->pdev, mhba->ib_max_size,
331 cmd->frame, cmd->frame_phys);
332 kfree(cmd);
333 }
334 }
335
336 /**
337 * mvumi_get_cmd - Get a command from the free pool
338 * @mhba: Adapter soft state
339 *
340 * Returns a free command from the pool
341 */
342 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
343 {
344 struct mvumi_cmd *cmd = NULL;
345
346 if (likely(!list_empty(&mhba->cmd_pool))) {
347 cmd = list_entry((&mhba->cmd_pool)->next,
348 struct mvumi_cmd, queue_pointer);
349 list_del_init(&cmd->queue_pointer);
350 } else
351 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
352
353 return cmd;
354 }
355
356 /**
357 * mvumi_return_cmd - Return a cmd to free command pool
358 * @mhba: Adapter soft state
359 * @cmd: Command packet to be returned to free command pool
360 */
361 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
362 struct mvumi_cmd *cmd)
363 {
364 cmd->scmd = NULL;
365 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
366 }
367
368 /**
369 * mvumi_free_cmds - Free all the cmds in the free cmd pool
370 * @mhba: Adapter soft state
371 */
372 static void mvumi_free_cmds(struct mvumi_hba *mhba)
373 {
374 struct mvumi_cmd *cmd;
375
376 while (!list_empty(&mhba->cmd_pool)) {
377 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
378 queue_pointer);
379 list_del(&cmd->queue_pointer);
380 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
381 kfree(cmd->frame);
382 kfree(cmd);
383 }
384 }
385
386 /**
387 * mvumi_alloc_cmds - Allocates the command packets
388 * @mhba: Adapter soft state
389 *
390 */
391 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
392 {
393 int i;
394 struct mvumi_cmd *cmd;
395
396 for (i = 0; i < mhba->max_io; i++) {
397 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
398 if (!cmd)
399 goto err_exit;
400
401 INIT_LIST_HEAD(&cmd->queue_pointer);
402 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
403 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
404 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
405 cmd->frame_phys = mhba->ib_frame_phys
406 + i * mhba->ib_max_size;
407 } else
408 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
409 if (!cmd->frame)
410 goto err_exit;
411 }
412 return 0;
413
414 err_exit:
415 dev_err(&mhba->pdev->dev,
416 "failed to allocate memory for cmd[0x%x].\n", i);
417 while (!list_empty(&mhba->cmd_pool)) {
418 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
419 queue_pointer);
420 list_del(&cmd->queue_pointer);
421 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
422 kfree(cmd->frame);
423 kfree(cmd);
424 }
425 return -ENOMEM;
426 }
427
428 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
429 {
430 unsigned int ib_rp_reg;
431 struct mvumi_hw_regs *regs = mhba->regs;
432
433 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
434
435 if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
436 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
437 ((ib_rp_reg & regs->cl_pointer_toggle)
438 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
439 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
440 return 0;
441 }
442 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
443 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
444 return 0;
445 } else {
446 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
447 }
448 }
449
450 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
451 {
452 unsigned int count;
453 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
454 return 0;
455 count = ioread32(mhba->ib_shadow);
456 if (count == 0xffff)
457 return 0;
458 return count;
459 }
460
461 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
462 {
463 unsigned int cur_ib_entry;
464
465 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
466 cur_ib_entry++;
467 if (cur_ib_entry >= mhba->list_num_io) {
468 cur_ib_entry -= mhba->list_num_io;
469 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
470 }
471 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
472 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
473 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
474 *ib_entry = mhba->ib_list + cur_ib_entry *
475 sizeof(struct mvumi_dyn_list_entry);
476 } else {
477 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
478 }
479 atomic_inc(&mhba->fw_outstanding);
480 }
481
482 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
483 {
484 iowrite32(0xffff, mhba->ib_shadow);
485 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
486 }
487
488 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
489 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
490 {
491 unsigned short tag, request_id;
492
493 udelay(1);
494 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
495 request_id = p_outb_frame->request_id;
496 tag = p_outb_frame->tag;
497 if (tag > mhba->tag_pool.size) {
498 dev_err(&mhba->pdev->dev, "ob frame data error\n");
499 return -1;
500 }
501 if (mhba->tag_cmd[tag] == NULL) {
502 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
503 return -1;
504 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
505 mhba->request_id_enabled) {
506 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
507 "cmd request ID:0x%x\n", request_id,
508 mhba->tag_cmd[tag]->request_id);
509 return -1;
510 }
511
512 return 0;
513 }
514
515 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
516 unsigned int *cur_obf, unsigned int *assign_obf_end)
517 {
518 unsigned int ob_write, ob_write_shadow;
519 struct mvumi_hw_regs *regs = mhba->regs;
520
521 do {
522 ob_write = ioread32(regs->outb_copy_pointer);
523 ob_write_shadow = ioread32(mhba->ob_shadow);
524 } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
525
526 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
527 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
528
529 if ((ob_write & regs->cl_pointer_toggle) !=
530 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
531 *assign_obf_end += mhba->list_num_io;
532 }
533 return 0;
534 }
535
536 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
537 unsigned int *cur_obf, unsigned int *assign_obf_end)
538 {
539 unsigned int ob_write;
540 struct mvumi_hw_regs *regs = mhba->regs;
541
542 ob_write = ioread32(regs->outb_read_pointer);
543 ob_write = ioread32(regs->outb_copy_pointer);
544 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
545 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
546 if (*assign_obf_end < *cur_obf)
547 *assign_obf_end += mhba->list_num_io;
548 else if (*assign_obf_end == *cur_obf)
549 return -1;
550 return 0;
551 }
552
553 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
554 {
555 unsigned int cur_obf, assign_obf_end, i;
556 struct mvumi_ob_data *ob_data;
557 struct mvumi_rsp_frame *p_outb_frame;
558 struct mvumi_hw_regs *regs = mhba->regs;
559
560 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
561 return;
562
563 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
564 cur_obf++;
565 if (cur_obf >= mhba->list_num_io) {
566 cur_obf -= mhba->list_num_io;
567 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
568 }
569
570 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
571
572 /* Copy pointer may point to entry in outbound list
573 * before entry has valid data
574 */
575 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
576 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
577 p_outb_frame->request_id !=
578 mhba->tag_cmd[p_outb_frame->tag]->request_id))
579 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
580 continue;
581
582 if (!list_empty(&mhba->ob_data_list)) {
583 ob_data = (struct mvumi_ob_data *)
584 list_first_entry(&mhba->ob_data_list,
585 struct mvumi_ob_data, list);
586 list_del_init(&ob_data->list);
587 } else {
588 ob_data = NULL;
589 if (cur_obf == 0) {
590 cur_obf = mhba->list_num_io - 1;
591 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
592 } else
593 cur_obf -= 1;
594 break;
595 }
596
597 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
598 p_outb_frame->tag = 0xff;
599
600 list_add_tail(&ob_data->list, &mhba->free_ob_list);
601 }
602 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
603 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
604 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
605 }
606
607 static void mvumi_reset(struct mvumi_hba *mhba)
608 {
609 struct mvumi_hw_regs *regs = mhba->regs;
610
611 iowrite32(0, regs->enpointa_mask_reg);
612 if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
613 return;
614
615 iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
616 }
617
618 static unsigned char mvumi_start(struct mvumi_hba *mhba);
619
620 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
621 {
622 mhba->fw_state = FW_STATE_ABORT;
623 mvumi_reset(mhba);
624
625 if (mvumi_start(mhba))
626 return FAILED;
627 else
628 return SUCCESS;
629 }
630
631 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
632 {
633 struct mvumi_hw_regs *regs = mhba->regs;
634 u32 tmp;
635 unsigned long before;
636 before = jiffies;
637
638 iowrite32(0, regs->enpointa_mask_reg);
639 tmp = ioread32(regs->arm_to_pciea_msg1);
640 while (tmp != HANDSHAKE_READYSTATE) {
641 iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
642 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
643 dev_err(&mhba->pdev->dev,
644 "FW reset failed [0x%x].\n", tmp);
645 return FAILED;
646 }
647
648 msleep(500);
649 rmb();
650 tmp = ioread32(regs->arm_to_pciea_msg1);
651 }
652
653 return SUCCESS;
654 }
655
656 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
657 {
658 unsigned char i;
659
660 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
661 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
662 &mhba->pci_base[i]);
663 }
664 }
665
666 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
667 {
668 unsigned char i;
669
670 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
671 if (mhba->pci_base[i])
672 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
673 mhba->pci_base[i]);
674 }
675 }
676
677 static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
678 {
679 unsigned int ret = 0;
680 pci_set_master(pdev);
681
682 if (IS_DMA64) {
683 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
684 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
685 } else
686 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
687
688 return ret;
689 }
690
691 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
692 {
693 mhba->fw_state = FW_STATE_ABORT;
694
695 iowrite32(0, mhba->regs->reset_enable);
696 iowrite32(0xf, mhba->regs->reset_request);
697
698 iowrite32(0x10, mhba->regs->reset_enable);
699 iowrite32(0x10, mhba->regs->reset_request);
700 msleep(100);
701 pci_disable_device(mhba->pdev);
702
703 if (pci_enable_device(mhba->pdev)) {
704 dev_err(&mhba->pdev->dev, "enable device failed\n");
705 return FAILED;
706 }
707 if (mvumi_pci_set_master(mhba->pdev)) {
708 dev_err(&mhba->pdev->dev, "set master failed\n");
709 return FAILED;
710 }
711 mvumi_restore_bar_addr(mhba);
712 if (mvumi_wait_for_fw(mhba) == FAILED)
713 return FAILED;
714
715 return mvumi_wait_for_outstanding(mhba);
716 }
717
718 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
719 {
720 return mvumi_wait_for_outstanding(mhba);
721 }
722
723 static int mvumi_host_reset(struct scsi_cmnd *scmd)
724 {
725 struct mvumi_hba *mhba;
726
727 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
728
729 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
730 scmd->serial_number, scmd->cmnd[0], scmd->retries);
731
732 return mhba->instancet->reset_host(mhba);
733 }
734
735 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
736 struct mvumi_cmd *cmd)
737 {
738 unsigned long flags;
739
740 cmd->cmd_status = REQ_STATUS_PENDING;
741
742 if (atomic_read(&cmd->sync_cmd)) {
743 dev_err(&mhba->pdev->dev,
744 "last blocked cmd not finished, sync_cmd = %d\n",
745 atomic_read(&cmd->sync_cmd));
746 BUG_ON(1);
747 return -1;
748 }
749 atomic_inc(&cmd->sync_cmd);
750 spin_lock_irqsave(mhba->shost->host_lock, flags);
751 mhba->instancet->fire_cmd(mhba, cmd);
752 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
753
754 wait_event_timeout(mhba->int_cmd_wait_q,
755 (cmd->cmd_status != REQ_STATUS_PENDING),
756 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
757
758 /* command timeout */
759 if (atomic_read(&cmd->sync_cmd)) {
760 spin_lock_irqsave(mhba->shost->host_lock, flags);
761 atomic_dec(&cmd->sync_cmd);
762 if (mhba->tag_cmd[cmd->frame->tag]) {
763 mhba->tag_cmd[cmd->frame->tag] = 0;
764 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
765 cmd->frame->tag);
766 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
767 }
768 if (!list_empty(&cmd->queue_pointer)) {
769 dev_warn(&mhba->pdev->dev,
770 "TIMEOUT:A internal command doesn't send!\n");
771 list_del_init(&cmd->queue_pointer);
772 } else
773 atomic_dec(&mhba->fw_outstanding);
774
775 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
776 }
777 return 0;
778 }
779
780 static void mvumi_release_fw(struct mvumi_hba *mhba)
781 {
782 mvumi_free_cmds(mhba);
783 mvumi_release_mem_resource(mhba);
784 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
785 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
786 mhba->handshake_page, mhba->handshake_page_phys);
787 kfree(mhba->regs);
788 pci_release_regions(mhba->pdev);
789 }
790
791 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
792 {
793 struct mvumi_cmd *cmd;
794 struct mvumi_msg_frame *frame;
795 unsigned char device_id, retry = 0;
796 unsigned char bitcount = sizeof(unsigned char) * 8;
797
798 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
799 if (!(mhba->target_map[device_id / bitcount] &
800 (1 << (device_id % bitcount))))
801 continue;
802 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
803 if (!cmd) {
804 if (retry++ >= 5) {
805 dev_err(&mhba->pdev->dev, "failed to get memory"
806 " for internal flush cache cmd for "
807 "device %d", device_id);
808 retry = 0;
809 continue;
810 } else
811 goto get_cmd;
812 }
813 cmd->scmd = NULL;
814 cmd->cmd_status = REQ_STATUS_PENDING;
815 atomic_set(&cmd->sync_cmd, 0);
816 frame = cmd->frame;
817 frame->req_function = CL_FUN_SCSI_CMD;
818 frame->device_id = device_id;
819 frame->cmd_flag = CMD_FLAG_NON_DATA;
820 frame->data_transfer_length = 0;
821 frame->cdb_length = MAX_COMMAND_SIZE;
822 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
823 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
824 frame->cdb[1] = CDB_CORE_MODULE;
825 frame->cdb[2] = CDB_CORE_SHUTDOWN;
826
827 mvumi_issue_blocked_cmd(mhba, cmd);
828 if (cmd->cmd_status != SAM_STAT_GOOD) {
829 dev_err(&mhba->pdev->dev,
830 "device %d flush cache failed, status=0x%x.\n",
831 device_id, cmd->cmd_status);
832 }
833
834 mvumi_delete_internal_cmd(mhba, cmd);
835 }
836 return 0;
837 }
838
839 static unsigned char
840 mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
841 unsigned short len)
842 {
843 unsigned char *ptr;
844 unsigned char ret = 0, i;
845
846 ptr = (unsigned char *) p_header->frame_content;
847 for (i = 0; i < len; i++) {
848 ret ^= *ptr;
849 ptr++;
850 }
851
852 return ret;
853 }
854
855 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
856 struct mvumi_hs_header *hs_header)
857 {
858 struct mvumi_hs_page2 *hs_page2;
859 struct mvumi_hs_page4 *hs_page4;
860 struct mvumi_hs_page3 *hs_page3;
861 struct timeval time;
862 unsigned int local_time;
863
864 switch (hs_header->page_code) {
865 case HS_PAGE_HOST_INFO:
866 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
867 hs_header->frame_length = sizeof(*hs_page2) - 4;
868 memset(hs_header->frame_content, 0, hs_header->frame_length);
869 hs_page2->host_type = 3; /* 3 mean linux*/
870 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
871 hs_page2->host_cap = 0x08;/* host dynamic source mode */
872 hs_page2->host_ver.ver_major = VER_MAJOR;
873 hs_page2->host_ver.ver_minor = VER_MINOR;
874 hs_page2->host_ver.ver_oem = VER_OEM;
875 hs_page2->host_ver.ver_build = VER_BUILD;
876 hs_page2->system_io_bus = 0;
877 hs_page2->slot_number = 0;
878 hs_page2->intr_level = 0;
879 hs_page2->intr_vector = 0;
880 do_gettimeofday(&time);
881 local_time = (unsigned int) (time.tv_sec -
882 (sys_tz.tz_minuteswest * 60));
883 hs_page2->seconds_since1970 = local_time;
884 hs_header->checksum = mvumi_calculate_checksum(hs_header,
885 hs_header->frame_length);
886 break;
887
888 case HS_PAGE_FIRM_CTL:
889 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
890 hs_header->frame_length = sizeof(*hs_page3) - 4;
891 memset(hs_header->frame_content, 0, hs_header->frame_length);
892 hs_header->checksum = mvumi_calculate_checksum(hs_header,
893 hs_header->frame_length);
894 break;
895
896 case HS_PAGE_CL_INFO:
897 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
898 hs_header->frame_length = sizeof(*hs_page4) - 4;
899 memset(hs_header->frame_content, 0, hs_header->frame_length);
900 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
901 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
902
903 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
904 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
905 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
906 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
907 if (mhba->hba_capability
908 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
909 hs_page4->ob_depth = find_first_bit((unsigned long *)
910 &mhba->list_num_io,
911 BITS_PER_LONG);
912 hs_page4->ib_depth = find_first_bit((unsigned long *)
913 &mhba->list_num_io,
914 BITS_PER_LONG);
915 } else {
916 hs_page4->ob_depth = (u8) mhba->list_num_io;
917 hs_page4->ib_depth = (u8) mhba->list_num_io;
918 }
919 hs_header->checksum = mvumi_calculate_checksum(hs_header,
920 hs_header->frame_length);
921 break;
922
923 default:
924 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
925 hs_header->page_code);
926 break;
927 }
928 }
929
930 /**
931 * mvumi_init_data - Initialize requested date for FW
932 * @mhba: Adapter soft state
933 */
934 static int mvumi_init_data(struct mvumi_hba *mhba)
935 {
936 struct mvumi_ob_data *ob_pool;
937 struct mvumi_res *res_mgnt;
938 unsigned int tmp_size, offset, i;
939 void *virmem, *v;
940 dma_addr_t p;
941
942 if (mhba->fw_flag & MVUMI_FW_ALLOC)
943 return 0;
944
945 tmp_size = mhba->ib_max_size * mhba->max_io;
946 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
947 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
948
949 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
950 tmp_size += 8 + sizeof(u32)*2 + 16;
951
952 res_mgnt = mvumi_alloc_mem_resource(mhba,
953 RESOURCE_UNCACHED_MEMORY, tmp_size);
954 if (!res_mgnt) {
955 dev_err(&mhba->pdev->dev,
956 "failed to allocate memory for inbound list\n");
957 goto fail_alloc_dma_buf;
958 }
959
960 p = res_mgnt->bus_addr;
961 v = res_mgnt->virt_addr;
962 /* ib_list */
963 offset = round_up(p, 128) - p;
964 p += offset;
965 v += offset;
966 mhba->ib_list = v;
967 mhba->ib_list_phys = p;
968 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
969 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
970 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
971 mhba->ib_frame = v;
972 mhba->ib_frame_phys = p;
973 }
974 v += mhba->ib_max_size * mhba->max_io;
975 p += mhba->ib_max_size * mhba->max_io;
976
977 /* ib shadow */
978 offset = round_up(p, 8) - p;
979 p += offset;
980 v += offset;
981 mhba->ib_shadow = v;
982 mhba->ib_shadow_phys = p;
983 p += sizeof(u32)*2;
984 v += sizeof(u32)*2;
985 /* ob shadow */
986 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
987 offset = round_up(p, 8) - p;
988 p += offset;
989 v += offset;
990 mhba->ob_shadow = v;
991 mhba->ob_shadow_phys = p;
992 p += 8;
993 v += 8;
994 } else {
995 offset = round_up(p, 4) - p;
996 p += offset;
997 v += offset;
998 mhba->ob_shadow = v;
999 mhba->ob_shadow_phys = p;
1000 p += 4;
1001 v += 4;
1002 }
1003
1004 /* ob list */
1005 offset = round_up(p, 128) - p;
1006 p += offset;
1007 v += offset;
1008
1009 mhba->ob_list = v;
1010 mhba->ob_list_phys = p;
1011
1012 /* ob data pool */
1013 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1014 tmp_size = round_up(tmp_size, 8);
1015
1016 res_mgnt = mvumi_alloc_mem_resource(mhba,
1017 RESOURCE_CACHED_MEMORY, tmp_size);
1018 if (!res_mgnt) {
1019 dev_err(&mhba->pdev->dev,
1020 "failed to allocate memory for outbound data buffer\n");
1021 goto fail_alloc_dma_buf;
1022 }
1023 virmem = res_mgnt->virt_addr;
1024
1025 for (i = mhba->max_io; i != 0; i--) {
1026 ob_pool = (struct mvumi_ob_data *) virmem;
1027 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1028 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1029 }
1030
1031 tmp_size = sizeof(unsigned short) * mhba->max_io +
1032 sizeof(struct mvumi_cmd *) * mhba->max_io;
1033 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1034 (sizeof(unsigned char) * 8);
1035
1036 res_mgnt = mvumi_alloc_mem_resource(mhba,
1037 RESOURCE_CACHED_MEMORY, tmp_size);
1038 if (!res_mgnt) {
1039 dev_err(&mhba->pdev->dev,
1040 "failed to allocate memory for tag and target map\n");
1041 goto fail_alloc_dma_buf;
1042 }
1043
1044 virmem = res_mgnt->virt_addr;
1045 mhba->tag_pool.stack = virmem;
1046 mhba->tag_pool.size = mhba->max_io;
1047 tag_init(&mhba->tag_pool, mhba->max_io);
1048 virmem += sizeof(unsigned short) * mhba->max_io;
1049
1050 mhba->tag_cmd = virmem;
1051 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1052
1053 mhba->target_map = virmem;
1054
1055 mhba->fw_flag |= MVUMI_FW_ALLOC;
1056 return 0;
1057
1058 fail_alloc_dma_buf:
1059 mvumi_release_mem_resource(mhba);
1060 return -1;
1061 }
1062
1063 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1064 struct mvumi_hs_header *hs_header)
1065 {
1066 struct mvumi_hs_page1 *hs_page1;
1067 unsigned char page_checksum;
1068
1069 page_checksum = mvumi_calculate_checksum(hs_header,
1070 hs_header->frame_length);
1071 if (page_checksum != hs_header->checksum) {
1072 dev_err(&mhba->pdev->dev, "checksum error\n");
1073 return -1;
1074 }
1075
1076 switch (hs_header->page_code) {
1077 case HS_PAGE_FIRM_CAP:
1078 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1079
1080 mhba->max_io = hs_page1->max_io_support;
1081 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1082 mhba->max_transfer_size = hs_page1->max_transfer_size;
1083 mhba->max_target_id = hs_page1->max_devices_support;
1084 mhba->hba_capability = hs_page1->capability;
1085 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1086 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1087
1088 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1089 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1090
1091 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1092 hs_page1->fw_ver.ver_build);
1093
1094 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1095 mhba->eot_flag = 22;
1096 else
1097 mhba->eot_flag = 27;
1098 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1099 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1100 break;
1101 default:
1102 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1103 return -1;
1104 }
1105 return 0;
1106 }
1107
1108 /**
1109 * mvumi_handshake - Move the FW to READY state
1110 * @mhba: Adapter soft state
1111 *
1112 * During the initialization, FW passes can potentially be in any one of
1113 * several possible states. If the FW in operational, waiting-for-handshake
1114 * states, driver must take steps to bring it to ready state. Otherwise, it
1115 * has to wait for the ready state.
1116 */
1117 static int mvumi_handshake(struct mvumi_hba *mhba)
1118 {
1119 unsigned int hs_state, tmp, hs_fun;
1120 struct mvumi_hs_header *hs_header;
1121 struct mvumi_hw_regs *regs = mhba->regs;
1122
1123 if (mhba->fw_state == FW_STATE_STARTING)
1124 hs_state = HS_S_START;
1125 else {
1126 tmp = ioread32(regs->arm_to_pciea_msg0);
1127 hs_state = HS_GET_STATE(tmp);
1128 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1129 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1130 mhba->fw_state = FW_STATE_STARTING;
1131 return -1;
1132 }
1133 }
1134
1135 hs_fun = 0;
1136 switch (hs_state) {
1137 case HS_S_START:
1138 mhba->fw_state = FW_STATE_HANDSHAKING;
1139 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1140 HS_SET_STATE(hs_fun, HS_S_RESET);
1141 iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1142 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1143 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1144 break;
1145
1146 case HS_S_RESET:
1147 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1148 regs->pciea_to_arm_msg1);
1149 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1150 regs->arm_to_pciea_msg1);
1151 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1152 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1153 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1154 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1155 break;
1156
1157 case HS_S_PAGE_ADDR:
1158 case HS_S_QUERY_PAGE:
1159 case HS_S_SEND_PAGE:
1160 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1161 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1162 mhba->hba_total_pages =
1163 ((struct mvumi_hs_page1 *) hs_header)->total_pages;
1164
1165 if (mhba->hba_total_pages == 0)
1166 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1167 }
1168
1169 if (hs_state == HS_S_QUERY_PAGE) {
1170 if (mvumi_hs_process_page(mhba, hs_header)) {
1171 HS_SET_STATE(hs_fun, HS_S_ABORT);
1172 return -1;
1173 }
1174 if (mvumi_init_data(mhba)) {
1175 HS_SET_STATE(hs_fun, HS_S_ABORT);
1176 return -1;
1177 }
1178 } else if (hs_state == HS_S_PAGE_ADDR) {
1179 hs_header->page_code = 0;
1180 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1181 }
1182
1183 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1184 hs_header->page_code++;
1185 if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1186 mvumi_hs_build_page(mhba, hs_header);
1187 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1188 } else
1189 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1190 } else
1191 HS_SET_STATE(hs_fun, HS_S_END);
1192
1193 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1194 iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1195 iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1196 break;
1197
1198 case HS_S_END:
1199 /* Set communication list ISR */
1200 tmp = ioread32(regs->enpointa_mask_reg);
1201 tmp |= regs->int_comaout | regs->int_comaerr;
1202 iowrite32(tmp, regs->enpointa_mask_reg);
1203 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1204 /* Set InBound List Available count shadow */
1205 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1206 regs->inb_aval_count_basel);
1207 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1208 regs->inb_aval_count_baseh);
1209
1210 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1211 /* Set OutBound List Available count shadow */
1212 iowrite32((mhba->list_num_io-1) |
1213 regs->cl_pointer_toggle,
1214 mhba->ob_shadow);
1215 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1216 regs->outb_copy_basel);
1217 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1218 regs->outb_copy_baseh);
1219 }
1220
1221 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1222 regs->cl_pointer_toggle;
1223 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1224 regs->cl_pointer_toggle;
1225 mhba->fw_state = FW_STATE_STARTED;
1226
1227 break;
1228 default:
1229 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1230 hs_state);
1231 return -1;
1232 }
1233 return 0;
1234 }
1235
1236 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1237 {
1238 unsigned int isr_status;
1239 unsigned long before;
1240
1241 before = jiffies;
1242 mvumi_handshake(mhba);
1243 do {
1244 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1245
1246 if (mhba->fw_state == FW_STATE_STARTED)
1247 return 0;
1248 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1249 dev_err(&mhba->pdev->dev,
1250 "no handshake response at state 0x%x.\n",
1251 mhba->fw_state);
1252 dev_err(&mhba->pdev->dev,
1253 "isr : global=0x%x,status=0x%x.\n",
1254 mhba->global_isr, isr_status);
1255 return -1;
1256 }
1257 rmb();
1258 usleep_range(1000, 2000);
1259 } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1260
1261 return 0;
1262 }
1263
1264 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1265 {
1266 unsigned int tmp;
1267 unsigned long before;
1268
1269 before = jiffies;
1270 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1271 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1272 if (tmp != HANDSHAKE_READYSTATE)
1273 iowrite32(DRBL_MU_RESET,
1274 mhba->regs->pciea_to_arm_drbl_reg);
1275 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1276 dev_err(&mhba->pdev->dev,
1277 "invalid signature [0x%x].\n", tmp);
1278 return -1;
1279 }
1280 usleep_range(1000, 2000);
1281 rmb();
1282 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1283 }
1284
1285 mhba->fw_state = FW_STATE_STARTING;
1286 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1287 do {
1288 if (mvumi_handshake_event(mhba)) {
1289 dev_err(&mhba->pdev->dev,
1290 "handshake failed at state 0x%x.\n",
1291 mhba->fw_state);
1292 return -1;
1293 }
1294 } while (mhba->fw_state != FW_STATE_STARTED);
1295
1296 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1297
1298 return 0;
1299 }
1300
1301 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1302 {
1303 unsigned int tmp;
1304 struct mvumi_hw_regs *regs = mhba->regs;
1305
1306 /* clear Door bell */
1307 tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1308 iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1309
1310 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1311 tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1312 iowrite32(tmp, regs->enpointa_mask_reg);
1313 msleep(100);
1314 if (mvumi_check_handshake(mhba))
1315 return -1;
1316
1317 return 0;
1318 }
1319
1320 /**
1321 * mvumi_complete_cmd - Completes a command
1322 * @mhba: Adapter soft state
1323 * @cmd: Command to be completed
1324 */
1325 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1326 struct mvumi_rsp_frame *ob_frame)
1327 {
1328 struct scsi_cmnd *scmd = cmd->scmd;
1329
1330 cmd->scmd->SCp.ptr = NULL;
1331 scmd->result = ob_frame->req_status;
1332
1333 switch (ob_frame->req_status) {
1334 case SAM_STAT_GOOD:
1335 scmd->result |= DID_OK << 16;
1336 break;
1337 case SAM_STAT_BUSY:
1338 scmd->result |= DID_BUS_BUSY << 16;
1339 break;
1340 case SAM_STAT_CHECK_CONDITION:
1341 scmd->result |= (DID_OK << 16);
1342 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1343 memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1344 sizeof(struct mvumi_sense_data));
1345 scmd->result |= (DRIVER_SENSE << 24);
1346 }
1347 break;
1348 default:
1349 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1350 break;
1351 }
1352
1353 if (scsi_bufflen(scmd)) {
1354 if (scsi_sg_count(scmd)) {
1355 pci_unmap_sg(mhba->pdev,
1356 scsi_sglist(scmd),
1357 scsi_sg_count(scmd),
1358 (int) scmd->sc_data_direction);
1359 } else {
1360 pci_unmap_single(mhba->pdev,
1361 scmd->SCp.dma_handle,
1362 scsi_bufflen(scmd),
1363 (int) scmd->sc_data_direction);
1364
1365 scmd->SCp.dma_handle = 0;
1366 }
1367 }
1368 cmd->scmd->scsi_done(scmd);
1369 mvumi_return_cmd(mhba, cmd);
1370 }
1371
1372 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1373 struct mvumi_cmd *cmd,
1374 struct mvumi_rsp_frame *ob_frame)
1375 {
1376 if (atomic_read(&cmd->sync_cmd)) {
1377 cmd->cmd_status = ob_frame->req_status;
1378
1379 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1380 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1381 cmd->data_buf) {
1382 memcpy(cmd->data_buf, ob_frame->payload,
1383 sizeof(struct mvumi_sense_data));
1384 }
1385 atomic_dec(&cmd->sync_cmd);
1386 wake_up(&mhba->int_cmd_wait_q);
1387 }
1388 }
1389
1390 static void mvumi_show_event(struct mvumi_hba *mhba,
1391 struct mvumi_driver_event *ptr)
1392 {
1393 unsigned int i;
1394
1395 dev_warn(&mhba->pdev->dev,
1396 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1397 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1398 if (ptr->param_count) {
1399 printk(KERN_WARNING "Event param(len 0x%x): ",
1400 ptr->param_count);
1401 for (i = 0; i < ptr->param_count; i++)
1402 printk(KERN_WARNING "0x%x ", ptr->params[i]);
1403
1404 printk(KERN_WARNING "\n");
1405 }
1406
1407 if (ptr->sense_data_length) {
1408 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1409 ptr->sense_data_length);
1410 for (i = 0; i < ptr->sense_data_length; i++)
1411 printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1412 printk(KERN_WARNING "\n");
1413 }
1414 }
1415
1416 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1417 {
1418 struct scsi_device *sdev;
1419 int ret = -1;
1420
1421 if (status == DEVICE_OFFLINE) {
1422 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1423 if (sdev) {
1424 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1425 sdev->id, 0);
1426 scsi_remove_device(sdev);
1427 scsi_device_put(sdev);
1428 ret = 0;
1429 } else
1430 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1431 devid);
1432 } else if (status == DEVICE_ONLINE) {
1433 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1434 if (!sdev) {
1435 scsi_add_device(mhba->shost, 0, devid, 0);
1436 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1437 devid, 0);
1438 ret = 0;
1439 } else {
1440 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1441 0, devid, 0);
1442 scsi_device_put(sdev);
1443 }
1444 }
1445 return ret;
1446 }
1447
1448 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1449 unsigned int id, struct mvumi_cmd *cmd)
1450 {
1451 struct mvumi_msg_frame *frame;
1452 u64 wwid = 0;
1453 int cmd_alloc = 0;
1454 int data_buf_len = 64;
1455
1456 if (!cmd) {
1457 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1458 if (cmd)
1459 cmd_alloc = 1;
1460 else
1461 return 0;
1462 } else {
1463 memset(cmd->data_buf, 0, data_buf_len);
1464 }
1465 cmd->scmd = NULL;
1466 cmd->cmd_status = REQ_STATUS_PENDING;
1467 atomic_set(&cmd->sync_cmd, 0);
1468 frame = cmd->frame;
1469 frame->device_id = (u16) id;
1470 frame->cmd_flag = CMD_FLAG_DATA_IN;
1471 frame->req_function = CL_FUN_SCSI_CMD;
1472 frame->cdb_length = 6;
1473 frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1474 memset(frame->cdb, 0, frame->cdb_length);
1475 frame->cdb[0] = INQUIRY;
1476 frame->cdb[4] = frame->data_transfer_length;
1477
1478 mvumi_issue_blocked_cmd(mhba, cmd);
1479
1480 if (cmd->cmd_status == SAM_STAT_GOOD) {
1481 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1482 wwid = id + 1;
1483 else
1484 memcpy((void *)&wwid,
1485 (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1486 MVUMI_INQUIRY_UUID_LEN);
1487 dev_dbg(&mhba->pdev->dev,
1488 "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1489 } else {
1490 wwid = 0;
1491 }
1492 if (cmd_alloc)
1493 mvumi_delete_internal_cmd(mhba, cmd);
1494
1495 return wwid;
1496 }
1497
1498 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1499 {
1500 struct mvumi_device *mv_dev = NULL , *dev_next;
1501 struct scsi_device *sdev = NULL;
1502
1503 mutex_lock(&mhba->device_lock);
1504
1505 /* detach Hard Disk */
1506 list_for_each_entry_safe(mv_dev, dev_next,
1507 &mhba->shost_dev_list, list) {
1508 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1509 list_del_init(&mv_dev->list);
1510 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1511 mv_dev->id, mv_dev->wwid);
1512 kfree(mv_dev);
1513 }
1514 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1515 list_del_init(&mv_dev->list);
1516 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1517 mv_dev->id, mv_dev->wwid);
1518 kfree(mv_dev);
1519 }
1520
1521 /* detach virtual device */
1522 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1523 sdev = scsi_device_lookup(mhba->shost, 0,
1524 mhba->max_target_id - 1, 0);
1525
1526 if (sdev) {
1527 scsi_remove_device(sdev);
1528 scsi_device_put(sdev);
1529 }
1530
1531 mutex_unlock(&mhba->device_lock);
1532 }
1533
1534 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1535 {
1536 struct scsi_device *sdev;
1537
1538 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1539 if (sdev) {
1540 scsi_rescan_device(&sdev->sdev_gendev);
1541 scsi_device_put(sdev);
1542 }
1543 }
1544
1545 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1546 {
1547 struct mvumi_device *mv_dev = NULL;
1548
1549 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1550 if (mv_dev->wwid == wwid) {
1551 if (mv_dev->id != id) {
1552 dev_err(&mhba->pdev->dev,
1553 "%s has same wwid[%llx] ,"
1554 " but different id[%d %d]\n",
1555 __func__, mv_dev->wwid, mv_dev->id, id);
1556 return -1;
1557 } else {
1558 if (mhba->pdev->device ==
1559 PCI_DEVICE_ID_MARVELL_MV9143)
1560 mvumi_rescan_devices(mhba, id);
1561 return 1;
1562 }
1563 }
1564 }
1565 return 0;
1566 }
1567
1568 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1569 {
1570 struct mvumi_device *mv_dev = NULL, *dev_next;
1571
1572 list_for_each_entry_safe(mv_dev, dev_next,
1573 &mhba->shost_dev_list, list) {
1574 if (mv_dev->id == id) {
1575 dev_dbg(&mhba->pdev->dev,
1576 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1577 mv_dev->id, mv_dev->wwid);
1578 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1579 list_del_init(&mv_dev->list);
1580 kfree(mv_dev);
1581 }
1582 }
1583 }
1584
1585 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1586 {
1587 int id, maxid;
1588 u64 wwid = 0;
1589 struct mvumi_device *mv_dev = NULL;
1590 struct mvumi_cmd *cmd = NULL;
1591 int found = 0;
1592
1593 cmd = mvumi_create_internal_cmd(mhba, 64);
1594 if (!cmd)
1595 return -1;
1596
1597 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1598 maxid = mhba->max_target_id;
1599 else
1600 maxid = mhba->max_target_id - 1;
1601
1602 for (id = 0; id < maxid; id++) {
1603 wwid = mvumi_inquiry(mhba, id, cmd);
1604 if (!wwid) {
1605 /* device no response, remove it */
1606 mvumi_remove_devices(mhba, id);
1607 } else {
1608 /* device response, add it */
1609 found = mvumi_match_devices(mhba, id, wwid);
1610 if (!found) {
1611 mvumi_remove_devices(mhba, id);
1612 mv_dev = kzalloc(sizeof(struct mvumi_device),
1613 GFP_KERNEL);
1614 if (!mv_dev) {
1615 dev_err(&mhba->pdev->dev,
1616 "%s alloc mv_dev failed\n",
1617 __func__);
1618 continue;
1619 }
1620 mv_dev->id = id;
1621 mv_dev->wwid = wwid;
1622 mv_dev->sdev = NULL;
1623 INIT_LIST_HEAD(&mv_dev->list);
1624 list_add_tail(&mv_dev->list,
1625 &mhba->mhba_dev_list);
1626 dev_dbg(&mhba->pdev->dev,
1627 "probe a new device(0:%d:0)"
1628 " wwid(%llx)\n", id, mv_dev->wwid);
1629 } else if (found == -1)
1630 return -1;
1631 else
1632 continue;
1633 }
1634 }
1635
1636 if (cmd)
1637 mvumi_delete_internal_cmd(mhba, cmd);
1638
1639 return 0;
1640 }
1641
1642 static int mvumi_rescan_bus(void *data)
1643 {
1644 int ret = 0;
1645 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1646 struct mvumi_device *mv_dev = NULL , *dev_next;
1647
1648 while (!kthread_should_stop()) {
1649
1650 set_current_state(TASK_INTERRUPTIBLE);
1651 if (!atomic_read(&mhba->pnp_count))
1652 schedule();
1653 msleep(1000);
1654 atomic_set(&mhba->pnp_count, 0);
1655 __set_current_state(TASK_RUNNING);
1656
1657 mutex_lock(&mhba->device_lock);
1658 ret = mvumi_probe_devices(mhba);
1659 if (!ret) {
1660 list_for_each_entry_safe(mv_dev, dev_next,
1661 &mhba->mhba_dev_list, list) {
1662 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1663 DEVICE_ONLINE)) {
1664 dev_err(&mhba->pdev->dev,
1665 "%s add device(0:%d:0) failed"
1666 "wwid(%llx) has exist\n",
1667 __func__,
1668 mv_dev->id, mv_dev->wwid);
1669 list_del_init(&mv_dev->list);
1670 kfree(mv_dev);
1671 } else {
1672 list_move_tail(&mv_dev->list,
1673 &mhba->shost_dev_list);
1674 }
1675 }
1676 }
1677 mutex_unlock(&mhba->device_lock);
1678 }
1679 return 0;
1680 }
1681
1682 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1683 struct mvumi_hotplug_event *param)
1684 {
1685 u16 size = param->size;
1686 const unsigned long *ar_bitmap;
1687 const unsigned long *re_bitmap;
1688 int index;
1689
1690 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1691 index = -1;
1692 ar_bitmap = (const unsigned long *) param->bitmap;
1693 re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1694
1695 mutex_lock(&mhba->sas_discovery_mutex);
1696 do {
1697 index = find_next_zero_bit(ar_bitmap, size, index + 1);
1698 if (index >= size)
1699 break;
1700 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1701 } while (1);
1702
1703 index = -1;
1704 do {
1705 index = find_next_zero_bit(re_bitmap, size, index + 1);
1706 if (index >= size)
1707 break;
1708 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1709 } while (1);
1710 mutex_unlock(&mhba->sas_discovery_mutex);
1711 }
1712 }
1713
1714 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1715 {
1716 if (msg == APICDB1_EVENT_GETEVENT) {
1717 int i, count;
1718 struct mvumi_driver_event *param = NULL;
1719 struct mvumi_event_req *er = buffer;
1720 count = er->count;
1721 if (count > MAX_EVENTS_RETURNED) {
1722 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1723 " than max event count[0x%x].\n",
1724 count, MAX_EVENTS_RETURNED);
1725 return;
1726 }
1727 for (i = 0; i < count; i++) {
1728 param = &er->events[i];
1729 mvumi_show_event(mhba, param);
1730 }
1731 } else if (msg == APICDB1_HOST_GETEVENT) {
1732 mvumi_proc_msg(mhba, buffer);
1733 }
1734 }
1735
1736 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1737 {
1738 struct mvumi_cmd *cmd;
1739 struct mvumi_msg_frame *frame;
1740
1741 cmd = mvumi_create_internal_cmd(mhba, 512);
1742 if (!cmd)
1743 return -1;
1744 cmd->scmd = NULL;
1745 cmd->cmd_status = REQ_STATUS_PENDING;
1746 atomic_set(&cmd->sync_cmd, 0);
1747 frame = cmd->frame;
1748 frame->device_id = 0;
1749 frame->cmd_flag = CMD_FLAG_DATA_IN;
1750 frame->req_function = CL_FUN_SCSI_CMD;
1751 frame->cdb_length = MAX_COMMAND_SIZE;
1752 frame->data_transfer_length = sizeof(struct mvumi_event_req);
1753 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1754 frame->cdb[0] = APICDB0_EVENT;
1755 frame->cdb[1] = msg;
1756 mvumi_issue_blocked_cmd(mhba, cmd);
1757
1758 if (cmd->cmd_status != SAM_STAT_GOOD)
1759 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1760 cmd->cmd_status);
1761 else
1762 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1763
1764 mvumi_delete_internal_cmd(mhba, cmd);
1765 return 0;
1766 }
1767
1768 static void mvumi_scan_events(struct work_struct *work)
1769 {
1770 struct mvumi_events_wq *mu_ev =
1771 container_of(work, struct mvumi_events_wq, work_q);
1772
1773 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1774 kfree(mu_ev);
1775 }
1776
1777 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1778 {
1779 struct mvumi_events_wq *mu_ev;
1780
1781 while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1782 if (isr_status & DRBL_BUS_CHANGE) {
1783 atomic_inc(&mhba->pnp_count);
1784 wake_up_process(mhba->dm_thread);
1785 isr_status &= ~(DRBL_BUS_CHANGE);
1786 continue;
1787 }
1788
1789 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1790 if (mu_ev) {
1791 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1792 mu_ev->mhba = mhba;
1793 mu_ev->event = APICDB1_EVENT_GETEVENT;
1794 isr_status &= ~(DRBL_EVENT_NOTIFY);
1795 mu_ev->param = NULL;
1796 schedule_work(&mu_ev->work_q);
1797 }
1798 }
1799 }
1800
1801 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1802 {
1803 struct mvumi_rsp_frame *ob_frame;
1804 struct mvumi_cmd *cmd;
1805 struct mvumi_ob_data *pool;
1806
1807 while (!list_empty(&mhba->free_ob_list)) {
1808 pool = list_first_entry(&mhba->free_ob_list,
1809 struct mvumi_ob_data, list);
1810 list_del_init(&pool->list);
1811 list_add_tail(&pool->list, &mhba->ob_data_list);
1812
1813 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1814 cmd = mhba->tag_cmd[ob_frame->tag];
1815
1816 atomic_dec(&mhba->fw_outstanding);
1817 mhba->tag_cmd[ob_frame->tag] = 0;
1818 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1819 if (cmd->scmd)
1820 mvumi_complete_cmd(mhba, cmd, ob_frame);
1821 else
1822 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1823 }
1824 mhba->instancet->fire_cmd(mhba, NULL);
1825 }
1826
1827 static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1828 {
1829 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1830 unsigned long flags;
1831
1832 spin_lock_irqsave(mhba->shost->host_lock, flags);
1833 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1834 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1835 return IRQ_NONE;
1836 }
1837
1838 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1839 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1840 mvumi_launch_events(mhba, mhba->isr_status);
1841 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1842 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1843 mvumi_handshake(mhba);
1844 }
1845
1846 }
1847
1848 if (mhba->global_isr & mhba->regs->int_comaout)
1849 mvumi_receive_ob_list_entry(mhba);
1850
1851 mhba->global_isr = 0;
1852 mhba->isr_status = 0;
1853 if (mhba->fw_state == FW_STATE_STARTED)
1854 mvumi_handle_clob(mhba);
1855 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1856 return IRQ_HANDLED;
1857 }
1858
1859 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1860 struct mvumi_cmd *cmd)
1861 {
1862 void *ib_entry;
1863 struct mvumi_msg_frame *ib_frame;
1864 unsigned int frame_len;
1865
1866 ib_frame = cmd->frame;
1867 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1868 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1869 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1870 }
1871 if (tag_is_empty(&mhba->tag_pool)) {
1872 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1873 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1874 }
1875 mvumi_get_ib_list_entry(mhba, &ib_entry);
1876
1877 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1878 cmd->frame->request_id = mhba->io_seq++;
1879 cmd->request_id = cmd->frame->request_id;
1880 mhba->tag_cmd[cmd->frame->tag] = cmd;
1881 frame_len = sizeof(*ib_frame) - 4 +
1882 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1883 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1884 struct mvumi_dyn_list_entry *dle;
1885 dle = ib_entry;
1886 dle->src_low_addr =
1887 cpu_to_le32(lower_32_bits(cmd->frame_phys));
1888 dle->src_high_addr =
1889 cpu_to_le32(upper_32_bits(cmd->frame_phys));
1890 dle->if_length = (frame_len >> 2) & 0xFFF;
1891 } else {
1892 memcpy(ib_entry, ib_frame, frame_len);
1893 }
1894 return MV_QUEUE_COMMAND_RESULT_SENT;
1895 }
1896
1897 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1898 {
1899 unsigned short num_of_cl_sent = 0;
1900 unsigned int count;
1901 enum mvumi_qc_result result;
1902
1903 if (cmd)
1904 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1905 count = mhba->instancet->check_ib_list(mhba);
1906 if (list_empty(&mhba->waiting_req_list) || !count)
1907 return;
1908
1909 do {
1910 cmd = list_first_entry(&mhba->waiting_req_list,
1911 struct mvumi_cmd, queue_pointer);
1912 list_del_init(&cmd->queue_pointer);
1913 result = mvumi_send_command(mhba, cmd);
1914 switch (result) {
1915 case MV_QUEUE_COMMAND_RESULT_SENT:
1916 num_of_cl_sent++;
1917 break;
1918 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1919 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1920 if (num_of_cl_sent > 0)
1921 mvumi_send_ib_list_entry(mhba);
1922
1923 return;
1924 }
1925 } while (!list_empty(&mhba->waiting_req_list) && count--);
1926
1927 if (num_of_cl_sent > 0)
1928 mvumi_send_ib_list_entry(mhba);
1929 }
1930
1931 /**
1932 * mvumi_enable_intr - Enables interrupts
1933 * @mhba: Adapter soft state
1934 */
1935 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1936 {
1937 unsigned int mask;
1938 struct mvumi_hw_regs *regs = mhba->regs;
1939
1940 iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1941 mask = ioread32(regs->enpointa_mask_reg);
1942 mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1943 iowrite32(mask, regs->enpointa_mask_reg);
1944 }
1945
1946 /**
1947 * mvumi_disable_intr -Disables interrupt
1948 * @mhba: Adapter soft state
1949 */
1950 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1951 {
1952 unsigned int mask;
1953 struct mvumi_hw_regs *regs = mhba->regs;
1954
1955 iowrite32(0, regs->arm_to_pciea_mask_reg);
1956 mask = ioread32(regs->enpointa_mask_reg);
1957 mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1958 regs->int_comaerr);
1959 iowrite32(mask, regs->enpointa_mask_reg);
1960 }
1961
1962 static int mvumi_clear_intr(void *extend)
1963 {
1964 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1965 unsigned int status, isr_status = 0, tmp = 0;
1966 struct mvumi_hw_regs *regs = mhba->regs;
1967
1968 status = ioread32(regs->main_int_cause_reg);
1969 if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1970 return 1;
1971 if (unlikely(status & regs->int_comaerr)) {
1972 tmp = ioread32(regs->outb_isr_cause);
1973 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1974 if (tmp & regs->clic_out_err) {
1975 iowrite32(tmp & regs->clic_out_err,
1976 regs->outb_isr_cause);
1977 }
1978 } else {
1979 if (tmp & (regs->clic_in_err | regs->clic_out_err))
1980 iowrite32(tmp & (regs->clic_in_err |
1981 regs->clic_out_err),
1982 regs->outb_isr_cause);
1983 }
1984 status ^= mhba->regs->int_comaerr;
1985 /* inbound or outbound parity error, command will timeout */
1986 }
1987 if (status & regs->int_comaout) {
1988 tmp = ioread32(regs->outb_isr_cause);
1989 if (tmp & regs->clic_irq)
1990 iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1991 }
1992 if (status & regs->int_dl_cpu2pciea) {
1993 isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1994 if (isr_status)
1995 iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1996 }
1997
1998 mhba->global_isr = status;
1999 mhba->isr_status = isr_status;
2000
2001 return 0;
2002 }
2003
2004 /**
2005 * mvumi_read_fw_status_reg - returns the current FW status value
2006 * @mhba: Adapter soft state
2007 */
2008 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2009 {
2010 unsigned int status;
2011
2012 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2013 if (status)
2014 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2015 return status;
2016 }
2017
2018 static struct mvumi_instance_template mvumi_instance_9143 = {
2019 .fire_cmd = mvumi_fire_cmd,
2020 .enable_intr = mvumi_enable_intr,
2021 .disable_intr = mvumi_disable_intr,
2022 .clear_intr = mvumi_clear_intr,
2023 .read_fw_status_reg = mvumi_read_fw_status_reg,
2024 .check_ib_list = mvumi_check_ib_list_9143,
2025 .check_ob_list = mvumi_check_ob_list_9143,
2026 .reset_host = mvumi_reset_host_9143,
2027 };
2028
2029 static struct mvumi_instance_template mvumi_instance_9580 = {
2030 .fire_cmd = mvumi_fire_cmd,
2031 .enable_intr = mvumi_enable_intr,
2032 .disable_intr = mvumi_disable_intr,
2033 .clear_intr = mvumi_clear_intr,
2034 .read_fw_status_reg = mvumi_read_fw_status_reg,
2035 .check_ib_list = mvumi_check_ib_list_9580,
2036 .check_ob_list = mvumi_check_ob_list_9580,
2037 .reset_host = mvumi_reset_host_9580,
2038 };
2039
2040 static int mvumi_slave_configure(struct scsi_device *sdev)
2041 {
2042 struct mvumi_hba *mhba;
2043 unsigned char bitcount = sizeof(unsigned char) * 8;
2044
2045 mhba = (struct mvumi_hba *) sdev->host->hostdata;
2046 if (sdev->id >= mhba->max_target_id)
2047 return -EINVAL;
2048
2049 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2050 return 0;
2051 }
2052
2053 /**
2054 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
2055 * @mhba: Adapter soft state
2056 * @scmd: SCSI command
2057 * @cmd: Command to be prepared in
2058 *
2059 * This function prepares CDB commands. These are typcially pass-through
2060 * commands to the devices.
2061 */
2062 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2063 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2064 {
2065 struct mvumi_msg_frame *pframe;
2066
2067 cmd->scmd = scmd;
2068 cmd->cmd_status = REQ_STATUS_PENDING;
2069 pframe = cmd->frame;
2070 pframe->device_id = ((unsigned short) scmd->device->id) |
2071 (((unsigned short) scmd->device->lun) << 8);
2072 pframe->cmd_flag = 0;
2073
2074 switch (scmd->sc_data_direction) {
2075 case DMA_NONE:
2076 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2077 break;
2078 case DMA_FROM_DEVICE:
2079 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2080 break;
2081 case DMA_TO_DEVICE:
2082 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2083 break;
2084 case DMA_BIDIRECTIONAL:
2085 default:
2086 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2087 "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2088 goto error;
2089 }
2090
2091 pframe->cdb_length = scmd->cmd_len;
2092 memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2093 pframe->req_function = CL_FUN_SCSI_CMD;
2094 if (scsi_bufflen(scmd)) {
2095 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2096 &pframe->sg_counts))
2097 goto error;
2098
2099 pframe->data_transfer_length = scsi_bufflen(scmd);
2100 } else {
2101 pframe->sg_counts = 0;
2102 pframe->data_transfer_length = 0;
2103 }
2104 return 0;
2105
2106 error:
2107 scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2108 SAM_STAT_CHECK_CONDITION;
2109 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2110 0);
2111 return -1;
2112 }
2113
2114 /**
2115 * mvumi_queue_command - Queue entry point
2116 * @scmd: SCSI command to be queued
2117 * @done: Callback entry point
2118 */
2119 static int mvumi_queue_command(struct Scsi_Host *shost,
2120 struct scsi_cmnd *scmd)
2121 {
2122 struct mvumi_cmd *cmd;
2123 struct mvumi_hba *mhba;
2124 unsigned long irq_flags;
2125
2126 spin_lock_irqsave(shost->host_lock, irq_flags);
2127 scsi_cmd_get_serial(shost, scmd);
2128
2129 mhba = (struct mvumi_hba *) shost->hostdata;
2130 scmd->result = 0;
2131 cmd = mvumi_get_cmd(mhba);
2132 if (unlikely(!cmd)) {
2133 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2134 return SCSI_MLQUEUE_HOST_BUSY;
2135 }
2136
2137 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2138 goto out_return_cmd;
2139
2140 cmd->scmd = scmd;
2141 scmd->SCp.ptr = (char *) cmd;
2142 mhba->instancet->fire_cmd(mhba, cmd);
2143 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2144 return 0;
2145
2146 out_return_cmd:
2147 mvumi_return_cmd(mhba, cmd);
2148 scmd->scsi_done(scmd);
2149 spin_unlock_irqrestore(shost->host_lock, irq_flags);
2150 return 0;
2151 }
2152
2153 static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2154 {
2155 struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2156 struct Scsi_Host *host = scmd->device->host;
2157 struct mvumi_hba *mhba = shost_priv(host);
2158 unsigned long flags;
2159
2160 spin_lock_irqsave(mhba->shost->host_lock, flags);
2161
2162 if (mhba->tag_cmd[cmd->frame->tag]) {
2163 mhba->tag_cmd[cmd->frame->tag] = 0;
2164 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2165 }
2166 if (!list_empty(&cmd->queue_pointer))
2167 list_del_init(&cmd->queue_pointer);
2168 else
2169 atomic_dec(&mhba->fw_outstanding);
2170
2171 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2172 scmd->SCp.ptr = NULL;
2173 if (scsi_bufflen(scmd)) {
2174 if (scsi_sg_count(scmd)) {
2175 pci_unmap_sg(mhba->pdev,
2176 scsi_sglist(scmd),
2177 scsi_sg_count(scmd),
2178 (int)scmd->sc_data_direction);
2179 } else {
2180 pci_unmap_single(mhba->pdev,
2181 scmd->SCp.dma_handle,
2182 scsi_bufflen(scmd),
2183 (int)scmd->sc_data_direction);
2184
2185 scmd->SCp.dma_handle = 0;
2186 }
2187 }
2188 mvumi_return_cmd(mhba, cmd);
2189 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2190
2191 return BLK_EH_NOT_HANDLED;
2192 }
2193
2194 static int
2195 mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2196 sector_t capacity, int geom[])
2197 {
2198 int heads, sectors;
2199 sector_t cylinders;
2200 unsigned long tmp;
2201
2202 heads = 64;
2203 sectors = 32;
2204 tmp = heads * sectors;
2205 cylinders = capacity;
2206 sector_div(cylinders, tmp);
2207
2208 if (capacity >= 0x200000) {
2209 heads = 255;
2210 sectors = 63;
2211 tmp = heads * sectors;
2212 cylinders = capacity;
2213 sector_div(cylinders, tmp);
2214 }
2215 geom[0] = heads;
2216 geom[1] = sectors;
2217 geom[2] = cylinders;
2218
2219 return 0;
2220 }
2221
2222 static struct scsi_host_template mvumi_template = {
2223
2224 .module = THIS_MODULE,
2225 .name = "Marvell Storage Controller",
2226 .slave_configure = mvumi_slave_configure,
2227 .queuecommand = mvumi_queue_command,
2228 .eh_host_reset_handler = mvumi_host_reset,
2229 .bios_param = mvumi_bios_param,
2230 .this_id = -1,
2231 };
2232
2233 static struct scsi_transport_template mvumi_transport_template = {
2234 .eh_timed_out = mvumi_timed_out,
2235 };
2236
2237 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2238 {
2239 void *base = NULL;
2240 struct mvumi_hw_regs *regs;
2241
2242 switch (mhba->pdev->device) {
2243 case PCI_DEVICE_ID_MARVELL_MV9143:
2244 mhba->mmio = mhba->base_addr[0];
2245 base = mhba->mmio;
2246 if (!mhba->regs) {
2247 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2248 if (mhba->regs == NULL)
2249 return -ENOMEM;
2250 }
2251 regs = mhba->regs;
2252
2253 /* For Arm */
2254 regs->ctrl_sts_reg = base + 0x20104;
2255 regs->rstoutn_mask_reg = base + 0x20108;
2256 regs->sys_soft_rst_reg = base + 0x2010C;
2257 regs->main_int_cause_reg = base + 0x20200;
2258 regs->enpointa_mask_reg = base + 0x2020C;
2259 regs->rstoutn_en_reg = base + 0xF1400;
2260 /* For Doorbell */
2261 regs->pciea_to_arm_drbl_reg = base + 0x20400;
2262 regs->arm_to_pciea_drbl_reg = base + 0x20408;
2263 regs->arm_to_pciea_mask_reg = base + 0x2040C;
2264 regs->pciea_to_arm_msg0 = base + 0x20430;
2265 regs->pciea_to_arm_msg1 = base + 0x20434;
2266 regs->arm_to_pciea_msg0 = base + 0x20438;
2267 regs->arm_to_pciea_msg1 = base + 0x2043C;
2268
2269 /* For Message Unit */
2270
2271 regs->inb_aval_count_basel = base + 0x508;
2272 regs->inb_aval_count_baseh = base + 0x50C;
2273 regs->inb_write_pointer = base + 0x518;
2274 regs->inb_read_pointer = base + 0x51C;
2275 regs->outb_coal_cfg = base + 0x568;
2276 regs->outb_copy_basel = base + 0x5B0;
2277 regs->outb_copy_baseh = base + 0x5B4;
2278 regs->outb_copy_pointer = base + 0x544;
2279 regs->outb_read_pointer = base + 0x548;
2280 regs->outb_isr_cause = base + 0x560;
2281 regs->outb_coal_cfg = base + 0x568;
2282 /* Bit setting for HW */
2283 regs->int_comaout = 1 << 8;
2284 regs->int_comaerr = 1 << 6;
2285 regs->int_dl_cpu2pciea = 1 << 1;
2286 regs->cl_pointer_toggle = 1 << 12;
2287 regs->clic_irq = 1 << 1;
2288 regs->clic_in_err = 1 << 8;
2289 regs->clic_out_err = 1 << 12;
2290 regs->cl_slot_num_mask = 0xFFF;
2291 regs->int_drbl_int_mask = 0x3FFFFFFF;
2292 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2293 regs->int_comaerr;
2294 break;
2295 case PCI_DEVICE_ID_MARVELL_MV9580:
2296 mhba->mmio = mhba->base_addr[2];
2297 base = mhba->mmio;
2298 if (!mhba->regs) {
2299 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2300 if (mhba->regs == NULL)
2301 return -ENOMEM;
2302 }
2303 regs = mhba->regs;
2304 /* For Arm */
2305 regs->ctrl_sts_reg = base + 0x20104;
2306 regs->rstoutn_mask_reg = base + 0x1010C;
2307 regs->sys_soft_rst_reg = base + 0x10108;
2308 regs->main_int_cause_reg = base + 0x10200;
2309 regs->enpointa_mask_reg = base + 0x1020C;
2310 regs->rstoutn_en_reg = base + 0xF1400;
2311
2312 /* For Doorbell */
2313 regs->pciea_to_arm_drbl_reg = base + 0x10460;
2314 regs->arm_to_pciea_drbl_reg = base + 0x10480;
2315 regs->arm_to_pciea_mask_reg = base + 0x10484;
2316 regs->pciea_to_arm_msg0 = base + 0x10400;
2317 regs->pciea_to_arm_msg1 = base + 0x10404;
2318 regs->arm_to_pciea_msg0 = base + 0x10420;
2319 regs->arm_to_pciea_msg1 = base + 0x10424;
2320
2321 /* For reset*/
2322 regs->reset_request = base + 0x10108;
2323 regs->reset_enable = base + 0x1010c;
2324
2325 /* For Message Unit */
2326 regs->inb_aval_count_basel = base + 0x4008;
2327 regs->inb_aval_count_baseh = base + 0x400C;
2328 regs->inb_write_pointer = base + 0x4018;
2329 regs->inb_read_pointer = base + 0x401C;
2330 regs->outb_copy_basel = base + 0x4058;
2331 regs->outb_copy_baseh = base + 0x405C;
2332 regs->outb_copy_pointer = base + 0x406C;
2333 regs->outb_read_pointer = base + 0x4070;
2334 regs->outb_coal_cfg = base + 0x4080;
2335 regs->outb_isr_cause = base + 0x4088;
2336 /* Bit setting for HW */
2337 regs->int_comaout = 1 << 4;
2338 regs->int_dl_cpu2pciea = 1 << 12;
2339 regs->int_comaerr = 1 << 29;
2340 regs->cl_pointer_toggle = 1 << 14;
2341 regs->cl_slot_num_mask = 0x3FFF;
2342 regs->clic_irq = 1 << 0;
2343 regs->clic_out_err = 1 << 1;
2344 regs->int_drbl_int_mask = 0x3FFFFFFF;
2345 regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2346 break;
2347 default:
2348 return -1;
2349 break;
2350 }
2351
2352 return 0;
2353 }
2354
2355 /**
2356 * mvumi_init_fw - Initializes the FW
2357 * @mhba: Adapter soft state
2358 *
2359 * This is the main function for initializing firmware.
2360 */
2361 static int mvumi_init_fw(struct mvumi_hba *mhba)
2362 {
2363 int ret = 0;
2364
2365 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2366 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2367 return -EBUSY;
2368 }
2369 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2370 if (ret)
2371 goto fail_ioremap;
2372
2373 switch (mhba->pdev->device) {
2374 case PCI_DEVICE_ID_MARVELL_MV9143:
2375 mhba->instancet = &mvumi_instance_9143;
2376 mhba->io_seq = 0;
2377 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2378 mhba->request_id_enabled = 1;
2379 break;
2380 case PCI_DEVICE_ID_MARVELL_MV9580:
2381 mhba->instancet = &mvumi_instance_9580;
2382 mhba->io_seq = 0;
2383 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2384 break;
2385 default:
2386 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2387 mhba->pdev->device);
2388 mhba->instancet = NULL;
2389 ret = -EINVAL;
2390 goto fail_alloc_mem;
2391 }
2392 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2393 mhba->pdev->device);
2394 ret = mvumi_cfg_hw_reg(mhba);
2395 if (ret) {
2396 dev_err(&mhba->pdev->dev,
2397 "failed to allocate memory for reg\n");
2398 ret = -ENOMEM;
2399 goto fail_alloc_mem;
2400 }
2401 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2402 &mhba->handshake_page_phys);
2403 if (!mhba->handshake_page) {
2404 dev_err(&mhba->pdev->dev,
2405 "failed to allocate memory for handshake\n");
2406 ret = -ENOMEM;
2407 goto fail_alloc_page;
2408 }
2409
2410 if (mvumi_start(mhba)) {
2411 ret = -EINVAL;
2412 goto fail_ready_state;
2413 }
2414 ret = mvumi_alloc_cmds(mhba);
2415 if (ret)
2416 goto fail_ready_state;
2417
2418 return 0;
2419
2420 fail_ready_state:
2421 mvumi_release_mem_resource(mhba);
2422 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2423 mhba->handshake_page, mhba->handshake_page_phys);
2424 fail_alloc_page:
2425 kfree(mhba->regs);
2426 fail_alloc_mem:
2427 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2428 fail_ioremap:
2429 pci_release_regions(mhba->pdev);
2430
2431 return ret;
2432 }
2433
2434 /**
2435 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
2436 * @mhba: Adapter soft state
2437 */
2438 static int mvumi_io_attach(struct mvumi_hba *mhba)
2439 {
2440 struct Scsi_Host *host = mhba->shost;
2441 struct scsi_device *sdev = NULL;
2442 int ret;
2443 unsigned int max_sg = (mhba->ib_max_size + 4 -
2444 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2445
2446 host->irq = mhba->pdev->irq;
2447 host->unique_id = mhba->unique_id;
2448 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2449 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2450 host->max_sectors = mhba->max_transfer_size / 512;
2451 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2452 host->max_id = mhba->max_target_id;
2453 host->max_cmd_len = MAX_COMMAND_SIZE;
2454 host->transportt = &mvumi_transport_template;
2455
2456 ret = scsi_add_host(host, &mhba->pdev->dev);
2457 if (ret) {
2458 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2459 return ret;
2460 }
2461 mhba->fw_flag |= MVUMI_FW_ATTACH;
2462
2463 mutex_lock(&mhba->sas_discovery_mutex);
2464 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2465 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2466 else
2467 ret = 0;
2468 if (ret) {
2469 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2470 mutex_unlock(&mhba->sas_discovery_mutex);
2471 goto fail_add_device;
2472 }
2473
2474 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2475 mhba, "mvumi_scanthread");
2476 if (IS_ERR(mhba->dm_thread)) {
2477 dev_err(&mhba->pdev->dev,
2478 "failed to create device scan thread\n");
2479 mutex_unlock(&mhba->sas_discovery_mutex);
2480 goto fail_create_thread;
2481 }
2482 atomic_set(&mhba->pnp_count, 1);
2483 wake_up_process(mhba->dm_thread);
2484
2485 mutex_unlock(&mhba->sas_discovery_mutex);
2486 return 0;
2487
2488 fail_create_thread:
2489 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2490 sdev = scsi_device_lookup(mhba->shost, 0,
2491 mhba->max_target_id - 1, 0);
2492 if (sdev) {
2493 scsi_remove_device(sdev);
2494 scsi_device_put(sdev);
2495 }
2496 fail_add_device:
2497 scsi_remove_host(mhba->shost);
2498 return ret;
2499 }
2500
2501 /**
2502 * mvumi_probe_one - PCI hotplug entry point
2503 * @pdev: PCI device structure
2504 * @id: PCI ids of supported hotplugged adapter
2505 */
2506 static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2507 {
2508 struct Scsi_Host *host;
2509 struct mvumi_hba *mhba;
2510 int ret;
2511
2512 dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2513 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2514 pdev->subsystem_device);
2515
2516 ret = pci_enable_device(pdev);
2517 if (ret)
2518 return ret;
2519
2520 pci_set_master(pdev);
2521
2522 if (IS_DMA64) {
2523 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2524 if (ret) {
2525 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2526 if (ret)
2527 goto fail_set_dma_mask;
2528 }
2529 } else {
2530 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2531 if (ret)
2532 goto fail_set_dma_mask;
2533 }
2534
2535 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2536 if (!host) {
2537 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2538 ret = -ENOMEM;
2539 goto fail_alloc_instance;
2540 }
2541 mhba = shost_priv(host);
2542
2543 INIT_LIST_HEAD(&mhba->cmd_pool);
2544 INIT_LIST_HEAD(&mhba->ob_data_list);
2545 INIT_LIST_HEAD(&mhba->free_ob_list);
2546 INIT_LIST_HEAD(&mhba->res_list);
2547 INIT_LIST_HEAD(&mhba->waiting_req_list);
2548 mutex_init(&mhba->device_lock);
2549 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2550 INIT_LIST_HEAD(&mhba->shost_dev_list);
2551 atomic_set(&mhba->fw_outstanding, 0);
2552 init_waitqueue_head(&mhba->int_cmd_wait_q);
2553 mutex_init(&mhba->sas_discovery_mutex);
2554
2555 mhba->pdev = pdev;
2556 mhba->shost = host;
2557 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2558
2559 ret = mvumi_init_fw(mhba);
2560 if (ret)
2561 goto fail_init_fw;
2562
2563 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2564 "mvumi", mhba);
2565 if (ret) {
2566 dev_err(&pdev->dev, "failed to register IRQ\n");
2567 goto fail_init_irq;
2568 }
2569
2570 mhba->instancet->enable_intr(mhba);
2571 pci_set_drvdata(pdev, mhba);
2572
2573 ret = mvumi_io_attach(mhba);
2574 if (ret)
2575 goto fail_io_attach;
2576
2577 mvumi_backup_bar_addr(mhba);
2578 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2579
2580 return 0;
2581
2582 fail_io_attach:
2583 mhba->instancet->disable_intr(mhba);
2584 free_irq(mhba->pdev->irq, mhba);
2585 fail_init_irq:
2586 mvumi_release_fw(mhba);
2587 fail_init_fw:
2588 scsi_host_put(host);
2589
2590 fail_alloc_instance:
2591 fail_set_dma_mask:
2592 pci_disable_device(pdev);
2593
2594 return ret;
2595 }
2596
2597 static void mvumi_detach_one(struct pci_dev *pdev)
2598 {
2599 struct Scsi_Host *host;
2600 struct mvumi_hba *mhba;
2601
2602 mhba = pci_get_drvdata(pdev);
2603 if (mhba->dm_thread) {
2604 kthread_stop(mhba->dm_thread);
2605 mhba->dm_thread = NULL;
2606 }
2607
2608 mvumi_detach_devices(mhba);
2609 host = mhba->shost;
2610 scsi_remove_host(mhba->shost);
2611 mvumi_flush_cache(mhba);
2612
2613 mhba->instancet->disable_intr(mhba);
2614 free_irq(mhba->pdev->irq, mhba);
2615 mvumi_release_fw(mhba);
2616 scsi_host_put(host);
2617 pci_disable_device(pdev);
2618 dev_dbg(&pdev->dev, "driver is removed!\n");
2619 }
2620
2621 /**
2622 * mvumi_shutdown - Shutdown entry point
2623 * @device: Generic device structure
2624 */
2625 static void mvumi_shutdown(struct pci_dev *pdev)
2626 {
2627 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2628
2629 mvumi_flush_cache(mhba);
2630 }
2631
2632 static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2633 {
2634 struct mvumi_hba *mhba = NULL;
2635
2636 mhba = pci_get_drvdata(pdev);
2637 mvumi_flush_cache(mhba);
2638
2639 pci_set_drvdata(pdev, mhba);
2640 mhba->instancet->disable_intr(mhba);
2641 free_irq(mhba->pdev->irq, mhba);
2642 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2643 pci_release_regions(pdev);
2644 pci_save_state(pdev);
2645 pci_disable_device(pdev);
2646 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2647
2648 return 0;
2649 }
2650
2651 static int mvumi_resume(struct pci_dev *pdev)
2652 {
2653 int ret;
2654 struct mvumi_hba *mhba = NULL;
2655
2656 mhba = pci_get_drvdata(pdev);
2657
2658 pci_set_power_state(pdev, PCI_D0);
2659 pci_enable_wake(pdev, PCI_D0, 0);
2660 pci_restore_state(pdev);
2661
2662 ret = pci_enable_device(pdev);
2663 if (ret) {
2664 dev_err(&pdev->dev, "enable device failed\n");
2665 return ret;
2666 }
2667 pci_set_master(pdev);
2668 if (IS_DMA64) {
2669 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2670 if (ret) {
2671 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2672 if (ret)
2673 goto fail;
2674 }
2675 } else {
2676 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2677 if (ret)
2678 goto fail;
2679 }
2680 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2681 if (ret)
2682 goto fail;
2683 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2684 if (ret)
2685 goto release_regions;
2686
2687 if (mvumi_cfg_hw_reg(mhba)) {
2688 ret = -EINVAL;
2689 goto unmap_pci_addr;
2690 }
2691
2692 mhba->mmio = mhba->base_addr[0];
2693 mvumi_reset(mhba);
2694
2695 if (mvumi_start(mhba)) {
2696 ret = -EINVAL;
2697 goto unmap_pci_addr;
2698 }
2699
2700 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2701 "mvumi", mhba);
2702 if (ret) {
2703 dev_err(&pdev->dev, "failed to register IRQ\n");
2704 goto unmap_pci_addr;
2705 }
2706 mhba->instancet->enable_intr(mhba);
2707
2708 return 0;
2709
2710 unmap_pci_addr:
2711 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2712 release_regions:
2713 pci_release_regions(pdev);
2714 fail:
2715 pci_disable_device(pdev);
2716
2717 return ret;
2718 }
2719
2720 static struct pci_driver mvumi_pci_driver = {
2721
2722 .name = MV_DRIVER_NAME,
2723 .id_table = mvumi_pci_table,
2724 .probe = mvumi_probe_one,
2725 .remove = mvumi_detach_one,
2726 .shutdown = mvumi_shutdown,
2727 #ifdef CONFIG_PM
2728 .suspend = mvumi_suspend,
2729 .resume = mvumi_resume,
2730 #endif
2731 };
2732
2733 /**
2734 * mvumi_init - Driver load entry point
2735 */
2736 static int __init mvumi_init(void)
2737 {
2738 return pci_register_driver(&mvumi_pci_driver);
2739 }
2740
2741 /**
2742 * mvumi_exit - Driver unload entry point
2743 */
2744 static void __exit mvumi_exit(void)
2745 {
2746
2747 pci_unregister_driver(&mvumi_pci_driver);
2748 }
2749
2750 module_init(mvumi_init);
2751 module_exit(mvumi_exit);