]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/pm8001/pm8001_init.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / pm8001 / pm8001_init.c
CommitLineData
dbf9bfe6 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#include "pm8001_sas.h"
42#include "pm8001_chips.h"
43
44static struct scsi_transport_template *pm8001_stt;
45
46static const struct pm8001_chip_info pm8001_chips[] = {
47 [chip_8001] = { 8, &pm8001_8001_dispatch,},
48};
49static int pm8001_id;
50
51LIST_HEAD(hba_list);
52
53/**
54 * The main structure which LLDD must register for scsi core.
55 */
56static struct scsi_host_template pm8001_sht = {
57 .module = THIS_MODULE,
58 .name = DRV_NAME,
59 .queuecommand = sas_queuecommand,
60 .target_alloc = sas_target_alloc,
61 .slave_configure = pm8001_slave_configure,
62 .slave_destroy = sas_slave_destroy,
63 .scan_finished = pm8001_scan_finished,
64 .scan_start = pm8001_scan_start,
65 .change_queue_depth = sas_change_queue_depth,
66 .change_queue_type = sas_change_queue_type,
67 .bios_param = sas_bios_param,
68 .can_queue = 1,
69 .cmd_per_lun = 1,
70 .this_id = -1,
71 .sg_tablesize = SG_ALL,
72 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
73 .use_clustering = ENABLE_CLUSTERING,
74 .eh_device_reset_handler = sas_eh_device_reset_handler,
75 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
76 .slave_alloc = pm8001_slave_alloc,
77 .target_destroy = sas_target_destroy,
78 .ioctl = sas_ioctl,
79 .shost_attrs = pm8001_host_attrs,
80};
81
82/**
83 * Sas layer call this function to execute specific task.
84 */
85static struct sas_domain_function_template pm8001_transport_ops = {
86 .lldd_dev_found = pm8001_dev_found,
87 .lldd_dev_gone = pm8001_dev_gone,
88
89 .lldd_execute_task = pm8001_queue_command,
90 .lldd_control_phy = pm8001_phy_control,
91
92 .lldd_abort_task = pm8001_abort_task,
93 .lldd_abort_task_set = pm8001_abort_task_set,
94 .lldd_clear_aca = pm8001_clear_aca,
95 .lldd_clear_task_set = pm8001_clear_task_set,
96 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
97 .lldd_lu_reset = pm8001_lu_reset,
98 .lldd_query_task = pm8001_query_task,
99};
100
101/**
102 *pm8001_phy_init - initiate our adapter phys
103 *@pm8001_ha: our hba structure.
104 *@phy_id: phy id.
105 */
106static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
107 int phy_id)
108{
109 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
110 struct asd_sas_phy *sas_phy = &phy->sas_phy;
111 phy->phy_state = 0;
112 phy->pm8001_ha = pm8001_ha;
113 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
114 sas_phy->class = SAS;
115 sas_phy->iproto = SAS_PROTOCOL_ALL;
116 sas_phy->tproto = 0;
117 sas_phy->type = PHY_TYPE_PHYSICAL;
118 sas_phy->role = PHY_ROLE_INITIATOR;
119 sas_phy->oob_mode = OOB_NOT_CONNECTED;
120 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
121 sas_phy->id = phy_id;
122 sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
123 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
124 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
125 sas_phy->lldd_phy = phy;
126}
127
128/**
129 *pm8001_free - free hba
130 *@pm8001_ha: our hba structure.
131 *
132 */
133static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
134{
135 int i;
136 struct pm8001_wq *wq;
137
138 if (!pm8001_ha)
139 return;
140
141 for (i = 0; i < USI_MAX_MEMCNT; i++) {
142 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
143 pci_free_consistent(pm8001_ha->pdev,
144 pm8001_ha->memoryMap.region[i].element_size,
145 pm8001_ha->memoryMap.region[i].virt_ptr,
146 pm8001_ha->memoryMap.region[i].phys_addr);
147 }
148 }
149 PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
150 if (pm8001_ha->shost)
151 scsi_host_put(pm8001_ha->shost);
152 list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
153 cancel_delayed_work(&wq->work_q);
154 kfree(pm8001_ha->tags);
155 kfree(pm8001_ha);
156}
157
158#ifdef PM8001_USE_TASKLET
159static void pm8001_tasklet(unsigned long opaque)
160{
161 struct pm8001_hba_info *pm8001_ha;
162 pm8001_ha = (struct pm8001_hba_info *)opaque;;
163 if (unlikely(!pm8001_ha))
164 BUG_ON(1);
165 PM8001_CHIP_DISP->isr(pm8001_ha);
166}
167#endif
168
169
170 /**
171 * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
172 * dispatcher to handle each case.
173 * @irq: irq number.
174 * @opaque: the passed general host adapter struct
175 */
176static irqreturn_t pm8001_interrupt(int irq, void *opaque)
177{
178 struct pm8001_hba_info *pm8001_ha;
179 irqreturn_t ret = IRQ_HANDLED;
180 struct sas_ha_struct *sha = opaque;
181 pm8001_ha = sha->lldd_ha;
182 if (unlikely(!pm8001_ha))
183 return IRQ_NONE;
184 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
185 return IRQ_NONE;
186#ifdef PM8001_USE_TASKLET
187 tasklet_schedule(&pm8001_ha->tasklet);
188#else
189 ret = PM8001_CHIP_DISP->isr(pm8001_ha);
190#endif
191 return ret;
192}
193
194/**
195 * pm8001_alloc - initiate our hba structure and 6 DMAs area.
196 * @pm8001_ha:our hba structure.
197 *
198 */
199static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
200{
201 int i;
202 spin_lock_init(&pm8001_ha->lock);
1cc943ae 203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
dbf9bfe6 204 pm8001_phy_init(pm8001_ha, i);
1cc943ae 205 pm8001_ha->port[i].wide_port_phymap = 0;
206 pm8001_ha->port[i].port_attached = 0;
207 pm8001_ha->port[i].port_state = 0;
208 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
209 }
dbf9bfe6 210
97ee2088 211 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
212 if (!pm8001_ha->tags)
213 goto err_out;
dbf9bfe6 214 /* MPI Memory region 1 for AAP Event Log for fw */
215 pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
216 pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
217 pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
218 pm8001_ha->memoryMap.region[AAP1].alignment = 32;
219
220 /* MPI Memory region 2 for IOP Event Log for fw */
221 pm8001_ha->memoryMap.region[IOP].num_elements = 1;
222 pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
223 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
224 pm8001_ha->memoryMap.region[IOP].alignment = 32;
225
226 /* MPI Memory region 3 for consumer Index of inbound queues */
227 pm8001_ha->memoryMap.region[CI].num_elements = 1;
228 pm8001_ha->memoryMap.region[CI].element_size = 4;
229 pm8001_ha->memoryMap.region[CI].total_len = 4;
230 pm8001_ha->memoryMap.region[CI].alignment = 4;
231
232 /* MPI Memory region 4 for producer Index of outbound queues */
233 pm8001_ha->memoryMap.region[PI].num_elements = 1;
234 pm8001_ha->memoryMap.region[PI].element_size = 4;
235 pm8001_ha->memoryMap.region[PI].total_len = 4;
236 pm8001_ha->memoryMap.region[PI].alignment = 4;
237
238 /* MPI Memory region 5 inbound queues */
239 pm8001_ha->memoryMap.region[IB].num_elements = 256;
240 pm8001_ha->memoryMap.region[IB].element_size = 64;
241 pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
242 pm8001_ha->memoryMap.region[IB].alignment = 64;
243
244 /* MPI Memory region 6 inbound queues */
245 pm8001_ha->memoryMap.region[OB].num_elements = 256;
246 pm8001_ha->memoryMap.region[OB].element_size = 64;
247 pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
248 pm8001_ha->memoryMap.region[OB].alignment = 64;
249
250 /* Memory region write DMA*/
251 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
252 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
253 pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
254 /* Memory region for devices*/
255 pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
256 pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
257 sizeof(struct pm8001_device);
258 pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
259 sizeof(struct pm8001_device);
260
261 /* Memory region for ccb_info*/
262 pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
263 pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
264 sizeof(struct pm8001_ccb_info);
265 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
266 sizeof(struct pm8001_ccb_info);
267
268 for (i = 0; i < USI_MAX_MEMCNT; i++) {
269 if (pm8001_mem_alloc(pm8001_ha->pdev,
270 &pm8001_ha->memoryMap.region[i].virt_ptr,
271 &pm8001_ha->memoryMap.region[i].phys_addr,
272 &pm8001_ha->memoryMap.region[i].phys_addr_hi,
273 &pm8001_ha->memoryMap.region[i].phys_addr_lo,
274 pm8001_ha->memoryMap.region[i].total_len,
275 pm8001_ha->memoryMap.region[i].alignment) != 0) {
276 PM8001_FAIL_DBG(pm8001_ha,
277 pm8001_printk("Mem%d alloc failed\n",
278 i));
279 goto err_out;
280 }
281 }
282
283 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
284 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
285 pm8001_ha->devices[i].dev_type = NO_DEVICE;
286 pm8001_ha->devices[i].id = i;
287 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
288 pm8001_ha->devices[i].running_req = 0;
289 }
290 pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
291 for (i = 0; i < PM8001_MAX_CCB; i++) {
292 pm8001_ha->ccb_info[i].ccb_dma_handle =
293 pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
294 i * sizeof(struct pm8001_ccb_info);
97ee2088 295 pm8001_ha->ccb_info[i].task = NULL;
296 pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
297 pm8001_ha->ccb_info[i].device = NULL;
dbf9bfe6 298 ++pm8001_ha->tags_num;
299 }
300 pm8001_ha->flags = PM8001F_INIT_TIME;
301 /* Initialize tags */
302 pm8001_tag_init(pm8001_ha);
303 return 0;
304err_out:
305 return 1;
306}
307
308/**
309 * pm8001_ioremap - remap the pci high physical address to kernal virtual
310 * address so that we can access them.
311 * @pm8001_ha:our hba structure.
312 */
313static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
314{
315 u32 bar;
316 u32 logicalBar = 0;
317 struct pci_dev *pdev;
318
319 pdev = pm8001_ha->pdev;
320 /* map pci mem (PMC pci base 0-3)*/
321 for (bar = 0; bar < 6; bar++) {
322 /*
323 ** logical BARs for SPC:
324 ** bar 0 and 1 - logical BAR0
325 ** bar 2 and 3 - logical BAR1
326 ** bar4 - logical BAR2
327 ** bar5 - logical BAR3
328 ** Skip the appropriate assignments:
329 */
330 if ((bar == 1) || (bar == 3))
331 continue;
332 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
333 pm8001_ha->io_mem[logicalBar].membase =
334 pci_resource_start(pdev, bar);
335 pm8001_ha->io_mem[logicalBar].membase &=
336 (u32)PCI_BASE_ADDRESS_MEM_MASK;
337 pm8001_ha->io_mem[logicalBar].memsize =
338 pci_resource_len(pdev, bar);
339 pm8001_ha->io_mem[logicalBar].memvirtaddr =
340 ioremap(pm8001_ha->io_mem[logicalBar].membase,
341 pm8001_ha->io_mem[logicalBar].memsize);
342 PM8001_INIT_DBG(pm8001_ha,
343 pm8001_printk("PCI: bar %d, logicalBar %d "
344 "virt_addr=%lx,len=%d\n", bar, logicalBar,
345 (unsigned long)
346 pm8001_ha->io_mem[logicalBar].memvirtaddr,
347 pm8001_ha->io_mem[logicalBar].memsize));
348 } else {
349 pm8001_ha->io_mem[logicalBar].membase = 0;
350 pm8001_ha->io_mem[logicalBar].memsize = 0;
351 pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
352 }
353 logicalBar++;
354 }
355 return 0;
356}
357
358/**
359 * pm8001_pci_alloc - initialize our ha card structure
360 * @pdev: pci device.
361 * @ent: ent
362 * @shost: scsi host struct which has been initialized before.
363 */
364static struct pm8001_hba_info *__devinit
365pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
366{
367 struct pm8001_hba_info *pm8001_ha;
368 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
369
370
371 pm8001_ha = sha->lldd_ha;
372 if (!pm8001_ha)
373 return NULL;
374
375 pm8001_ha->pdev = pdev;
376 pm8001_ha->dev = &pdev->dev;
377 pm8001_ha->chip_id = chip_id;
378 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
379 pm8001_ha->irq = pdev->irq;
380 pm8001_ha->sas = sha;
381 pm8001_ha->shost = shost;
382 pm8001_ha->id = pm8001_id++;
383 INIT_LIST_HEAD(&pm8001_ha->wq_list);
384 pm8001_ha->logging_level = 0x01;
385 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
386#ifdef PM8001_USE_TASKLET
387 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
388 (unsigned long)pm8001_ha);
389#endif
390 pm8001_ioremap(pm8001_ha);
391 if (!pm8001_alloc(pm8001_ha))
392 return pm8001_ha;
393 pm8001_free(pm8001_ha);
394 return NULL;
395}
396
397/**
398 * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
399 * @pdev: pci device.
400 */
401static int pci_go_44(struct pci_dev *pdev)
402{
403 int rc;
404
405 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
406 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
407 if (rc) {
408 rc = pci_set_consistent_dma_mask(pdev,
409 DMA_BIT_MASK(32));
410 if (rc) {
411 dev_printk(KERN_ERR, &pdev->dev,
412 "44-bit DMA enable failed\n");
413 return rc;
414 }
415 }
416 } else {
417 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
418 if (rc) {
419 dev_printk(KERN_ERR, &pdev->dev,
420 "32-bit DMA enable failed\n");
421 return rc;
422 }
423 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
424 if (rc) {
425 dev_printk(KERN_ERR, &pdev->dev,
426 "32-bit consistent DMA enable failed\n");
427 return rc;
428 }
429 }
430 return rc;
431}
432
433/**
434 * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
435 * @shost: scsi host which has been allocated outside.
436 * @chip_info: our ha struct.
437 */
438static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
439 const struct pm8001_chip_info *chip_info)
440{
441 int phy_nr, port_nr;
442 struct asd_sas_phy **arr_phy;
443 struct asd_sas_port **arr_port;
444 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
445
446 phy_nr = chip_info->n_phy;
447 port_nr = phy_nr;
448 memset(sha, 0x00, sizeof(*sha));
449 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
450 if (!arr_phy)
451 goto exit;
452 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
453 if (!arr_port)
454 goto exit_free2;
455
456 sha->sas_phy = arr_phy;
457 sha->sas_port = arr_port;
458 sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
459 if (!sha->lldd_ha)
460 goto exit_free1;
461
462 shost->transportt = pm8001_stt;
463 shost->max_id = PM8001_MAX_DEVICES;
464 shost->max_lun = 8;
465 shost->max_channel = 0;
466 shost->unique_id = pm8001_id;
467 shost->max_cmd_len = 16;
468 shost->can_queue = PM8001_CAN_QUEUE;
469 shost->cmd_per_lun = 32;
470 return 0;
471exit_free1:
472 kfree(arr_port);
473exit_free2:
474 kfree(arr_phy);
475exit:
476 return -1;
477}
478
479/**
480 * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
481 * @shost: scsi host which has been allocated outside
482 * @chip_info: our ha struct.
483 */
484static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
485 const struct pm8001_chip_info *chip_info)
486{
487 int i = 0;
488 struct pm8001_hba_info *pm8001_ha;
489 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
490
491 pm8001_ha = sha->lldd_ha;
492 for (i = 0; i < chip_info->n_phy; i++) {
493 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
494 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
495 }
496 sha->sas_ha_name = DRV_NAME;
497 sha->dev = pm8001_ha->dev;
498
499 sha->lldd_module = THIS_MODULE;
500 sha->sas_addr = &pm8001_ha->sas_addr[0];
501 sha->num_phys = chip_info->n_phy;
502 sha->lldd_max_execute_num = 1;
503 sha->lldd_queue_size = PM8001_CAN_QUEUE;
504 sha->core.shost = shost;
505}
506
507/**
508 * pm8001_init_sas_add - initialize sas address
509 * @chip_info: our ha struct.
510 *
511 * Currently we just set the fixed SAS address to our HBA,for manufacture,
512 * it should read from the EEPROM
513 */
514static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
515{
516 u8 i;
517#ifdef PM8001_READ_VPD
518 DECLARE_COMPLETION_ONSTACK(completion);
7c8356d9 519 struct pm8001_ioctl_payload payload;
dbf9bfe6 520 pm8001_ha->nvmd_completion = &completion;
7c8356d9 521 payload.minor_function = 0;
522 payload.length = 128;
523 payload.func_specific = kzalloc(128, GFP_KERNEL);
524 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
dbf9bfe6 525 wait_for_completion(&completion);
526 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
527 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
528 SAS_ADDR_SIZE);
529 PM8001_INIT_DBG(pm8001_ha,
7c8356d9 530 pm8001_printk("phy %d sas_addr = %016llx \n", i,
531 pm8001_ha->phy[i].dev_sas_addr));
dbf9bfe6 532 }
533#else
534 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
7c8356d9 535 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
dbf9bfe6 536 pm8001_ha->phy[i].dev_sas_addr =
537 cpu_to_be64((u64)
538 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
539 }
540 memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
541 SAS_ADDR_SIZE);
542#endif
543}
544
545#ifdef PM8001_USE_MSIX
546/**
547 * pm8001_setup_msix - enable MSI-X interrupt
548 * @chip_info: our ha struct.
549 * @irq_handler: irq_handler
550 */
551static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
552 irq_handler_t irq_handler)
553{
554 u32 i = 0, j = 0;
555 u32 number_of_intr = 1;
556 int flag = 0;
557 u32 max_entry;
558 int rc;
559 max_entry = sizeof(pm8001_ha->msix_entries) /
560 sizeof(pm8001_ha->msix_entries[0]);
561 flag |= IRQF_DISABLED;
562 for (i = 0; i < max_entry ; i++)
563 pm8001_ha->msix_entries[i].entry = i;
564 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
565 number_of_intr);
566 pm8001_ha->number_of_intr = number_of_intr;
567 if (!rc) {
568 for (i = 0; i < number_of_intr; i++) {
569 if (request_irq(pm8001_ha->msix_entries[i].vector,
570 irq_handler, flag, DRV_NAME,
571 SHOST_TO_SAS_HA(pm8001_ha->shost))) {
572 for (j = 0; j < i; j++)
573 free_irq(
574 pm8001_ha->msix_entries[j].vector,
575 SHOST_TO_SAS_HA(pm8001_ha->shost));
576 pci_disable_msix(pm8001_ha->pdev);
577 break;
578 }
579 }
580 }
581 return rc;
582}
583#endif
584
585/**
586 * pm8001_request_irq - register interrupt
587 * @chip_info: our ha struct.
588 */
589static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
590{
591 struct pci_dev *pdev;
592 irq_handler_t irq_handler = pm8001_interrupt;
97ee2088 593 int rc;
dbf9bfe6 594
595 pdev = pm8001_ha->pdev;
596
597#ifdef PM8001_USE_MSIX
598 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
599 return pm8001_setup_msix(pm8001_ha, irq_handler);
600 else
601 goto intx;
602#endif
603
604intx:
605 /* intialize the INT-X interrupt */
606 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
607 SHOST_TO_SAS_HA(pm8001_ha->shost));
608 return rc;
609}
610
611/**
612 * pm8001_pci_probe - probe supported device
613 * @pdev: pci device which kernel has been prepared for.
614 * @ent: pci device id
615 *
616 * This function is the main initialization function, when register a new
617 * pci driver it is invoked, all struct an hardware initilization should be done
618 * here, also, register interrupt
619 */
620static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
621 const struct pci_device_id *ent)
622{
623 unsigned int rc;
624 u32 pci_reg;
625 struct pm8001_hba_info *pm8001_ha;
626 struct Scsi_Host *shost = NULL;
627 const struct pm8001_chip_info *chip;
628
629 dev_printk(KERN_INFO, &pdev->dev,
630 "pm8001: driver version %s\n", DRV_VERSION);
631 rc = pci_enable_device(pdev);
632 if (rc)
633 goto err_out_enable;
634 pci_set_master(pdev);
635 /*
636 * Enable pci slot busmaster by setting pci command register.
637 * This is required by FW for Cyclone card.
638 */
639
640 pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
641 pci_reg |= 0x157;
642 pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
643 rc = pci_request_regions(pdev, DRV_NAME);
644 if (rc)
645 goto err_out_disable;
646 rc = pci_go_44(pdev);
647 if (rc)
648 goto err_out_regions;
649
650 shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
651 if (!shost) {
652 rc = -ENOMEM;
653 goto err_out_regions;
654 }
655 chip = &pm8001_chips[ent->driver_data];
656 SHOST_TO_SAS_HA(shost) =
3dbf6c00 657 kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
dbf9bfe6 658 if (!SHOST_TO_SAS_HA(shost)) {
659 rc = -ENOMEM;
660 goto err_out_free_host;
661 }
662
663 rc = pm8001_prep_sas_ha_init(shost, chip);
664 if (rc) {
665 rc = -ENOMEM;
666 goto err_out_free;
667 }
668 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
669 pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
670 if (!pm8001_ha) {
671 rc = -ENOMEM;
672 goto err_out_free;
673 }
674 list_add_tail(&pm8001_ha->list, &hba_list);
675 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
676 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
677 if (rc)
678 goto err_out_ha_free;
679
680 rc = scsi_add_host(shost, &pdev->dev);
681 if (rc)
682 goto err_out_ha_free;
683 rc = pm8001_request_irq(pm8001_ha);
684 if (rc)
685 goto err_out_shost;
686
687 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
688 pm8001_init_sas_add(pm8001_ha);
689 pm8001_post_sas_ha_init(shost, chip);
690 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
691 if (rc)
692 goto err_out_shost;
693 scsi_scan_host(pm8001_ha->shost);
694 return 0;
695
696err_out_shost:
697 scsi_remove_host(pm8001_ha->shost);
698err_out_ha_free:
699 pm8001_free(pm8001_ha);
700err_out_free:
701 kfree(SHOST_TO_SAS_HA(shost));
702err_out_free_host:
703 kfree(shost);
704err_out_regions:
705 pci_release_regions(pdev);
706err_out_disable:
707 pci_disable_device(pdev);
708err_out_enable:
709 return rc;
710}
711
712static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
713{
714 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
715 struct pm8001_hba_info *pm8001_ha;
716 int i;
717 pm8001_ha = sha->lldd_ha;
718 pci_set_drvdata(pdev, NULL);
719 sas_unregister_ha(sha);
720 sas_remove_host(pm8001_ha->shost);
721 list_del(&pm8001_ha->list);
722 scsi_remove_host(pm8001_ha->shost);
723 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
724 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
725
726#ifdef PM8001_USE_MSIX
727 for (i = 0; i < pm8001_ha->number_of_intr; i++)
728 synchronize_irq(pm8001_ha->msix_entries[i].vector);
729 for (i = 0; i < pm8001_ha->number_of_intr; i++)
730 free_irq(pm8001_ha->msix_entries[i].vector, sha);
731 pci_disable_msix(pdev);
732#else
733 free_irq(pm8001_ha->irq, sha);
734#endif
735#ifdef PM8001_USE_TASKLET
736 tasklet_kill(&pm8001_ha->tasklet);
737#endif
738 pm8001_free(pm8001_ha);
739 kfree(sha->sas_phy);
740 kfree(sha->sas_port);
741 kfree(sha);
742 pci_release_regions(pdev);
743 pci_disable_device(pdev);
744}
745
746/**
747 * pm8001_pci_suspend - power management suspend main entry point
748 * @pdev: PCI device struct
749 * @state: PM state change to (usually PCI_D3)
750 *
751 * Returns 0 success, anything else error.
752 */
753static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
754{
755 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
756 struct pm8001_hba_info *pm8001_ha;
757 int i , pos;
758 u32 device_state;
759 pm8001_ha = sha->lldd_ha;
760 flush_scheduled_work();
761 scsi_block_requests(pm8001_ha->shost);
762 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
763 if (pos == 0) {
764 printk(KERN_ERR " PCI PM not supported\n");
765 return -ENODEV;
766 }
767 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
768 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
769#ifdef PM8001_USE_MSIX
770 for (i = 0; i < pm8001_ha->number_of_intr; i++)
771 synchronize_irq(pm8001_ha->msix_entries[i].vector);
772 for (i = 0; i < pm8001_ha->number_of_intr; i++)
773 free_irq(pm8001_ha->msix_entries[i].vector, sha);
774 pci_disable_msix(pdev);
775#else
776 free_irq(pm8001_ha->irq, sha);
777#endif
778#ifdef PM8001_USE_TASKLET
779 tasklet_kill(&pm8001_ha->tasklet);
780#endif
781 device_state = pci_choose_state(pdev, state);
782 pm8001_printk("pdev=0x%p, slot=%s, entering "
783 "operating state [D%d]\n", pdev,
784 pm8001_ha->name, device_state);
785 pci_save_state(pdev);
786 pci_disable_device(pdev);
787 pci_set_power_state(pdev, device_state);
788 return 0;
789}
790
791/**
792 * pm8001_pci_resume - power management resume main entry point
793 * @pdev: PCI device struct
794 *
795 * Returns 0 success, anything else error.
796 */
797static int pm8001_pci_resume(struct pci_dev *pdev)
798{
799 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
800 struct pm8001_hba_info *pm8001_ha;
801 int rc;
802 u32 device_state;
803 pm8001_ha = sha->lldd_ha;
804 device_state = pdev->current_state;
805
806 pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
807 "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
808
809 pci_set_power_state(pdev, PCI_D0);
810 pci_enable_wake(pdev, PCI_D0, 0);
811 pci_restore_state(pdev);
812 rc = pci_enable_device(pdev);
813 if (rc) {
814 pm8001_printk("slot=%s Enable device failed during resume\n",
815 pm8001_ha->name);
816 goto err_out_enable;
817 }
818
819 pci_set_master(pdev);
820 rc = pci_go_44(pdev);
821 if (rc)
822 goto err_out_disable;
823
824 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
825 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
826 if (rc)
827 goto err_out_disable;
828 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
829 rc = pm8001_request_irq(pm8001_ha);
830 if (rc)
831 goto err_out_disable;
832 #ifdef PM8001_USE_TASKLET
833 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
834 (unsigned long)pm8001_ha);
835 #endif
836 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
837 scsi_unblock_requests(pm8001_ha->shost);
838 return 0;
839
840err_out_disable:
841 scsi_remove_host(pm8001_ha->shost);
842 pci_disable_device(pdev);
843err_out_enable:
844 return rc;
845}
846
847static struct pci_device_id __devinitdata pm8001_pci_table[] = {
848 {
849 PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
850 },
851 {
852 PCI_DEVICE(0x117c, 0x0042),
853 .driver_data = chip_8001
854 },
855 {} /* terminate list */
856};
857
858static struct pci_driver pm8001_pci_driver = {
859 .name = DRV_NAME,
860 .id_table = pm8001_pci_table,
861 .probe = pm8001_pci_probe,
862 .remove = __devexit_p(pm8001_pci_remove),
863 .suspend = pm8001_pci_suspend,
864 .resume = pm8001_pci_resume,
865};
866
867/**
868 * pm8001_init - initialize scsi transport template
869 */
870static int __init pm8001_init(void)
871{
872 int rc;
873 pm8001_id = 0;
874 pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
875 if (!pm8001_stt)
876 return -ENOMEM;
877 rc = pci_register_driver(&pm8001_pci_driver);
878 if (rc)
879 goto err_out;
880 return 0;
881err_out:
882 sas_release_transport(pm8001_stt);
883 return rc;
884}
885
886static void __exit pm8001_exit(void)
887{
888 pci_unregister_driver(&pm8001_pci_driver);
889 sas_release_transport(pm8001_stt);
890}
891
892module_init(pm8001_init);
893module_exit(pm8001_exit);
894
895MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
896MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
897MODULE_VERSION(DRV_VERSION);
898MODULE_LICENSE("GPL");
899MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
900