]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/aacraid/comminit.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / aacraid / comminit.c
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 * comminit.c
26 *
27 * Abstract: This supports the initialization of the host adapter commuication interface.
28 * This is a platform dependent module for the pci cyclone board.
29 *
30 */
31
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/pci.h>
36 #include <linux/spinlock.h>
37 #include <linux/slab.h>
38 #include <linux/blkdev.h>
39 #include <linux/completion.h>
40 #include <linux/mm.h>
41 #include <scsi/scsi_host.h>
42 #include <asm/semaphore.h>
43
44 #include "aacraid.h"
45
46 struct aac_common aac_config = {
47 .irq_mod = 1
48 };
49
50 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
51 {
52 unsigned char *base;
53 unsigned long size, align;
54 const unsigned long fibsize = 4096;
55 const unsigned long printfbufsiz = 256;
56 struct aac_init *init;
57 dma_addr_t phys;
58
59 size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
60
61
62 base = pci_alloc_consistent(dev->pdev, size, &phys);
63
64 if(base == NULL)
65 {
66 printk(KERN_ERR "aacraid: unable to create mapping.\n");
67 return 0;
68 }
69 dev->comm_addr = (void *)base;
70 dev->comm_phys = phys;
71 dev->comm_size = size;
72
73 dev->init = (struct aac_init *)(base + fibsize);
74 dev->init_pa = phys + fibsize;
75
76 init = dev->init;
77
78 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
79 if (dev->max_fib_size != sizeof(struct hw_fib))
80 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
81 init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION);
82 init->fsrev = cpu_to_le32(dev->fsrev);
83
84 /*
85 * Adapter Fibs are the first thing allocated so that they
86 * start page aligned
87 */
88 dev->aif_base_va = (struct hw_fib *)base;
89
90 init->AdapterFibsVirtualAddress = 0;
91 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
92 init->AdapterFibsSize = cpu_to_le32(fibsize);
93 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
94 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
95
96 init->InitFlags = 0;
97 if (dev->comm_interface == AAC_COMM_MESSAGE) {
98 init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
99 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
100 }
101 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
102 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
103 init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
104
105 /*
106 * Increment the base address by the amount already used
107 */
108 base = base + fibsize + sizeof(struct aac_init);
109 phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
110 /*
111 * Align the beginning of Headers to commalign
112 */
113 align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
114 base = base + align;
115 phys = phys + align;
116 /*
117 * Fill in addresses of the Comm Area Headers and Queues
118 */
119 *commaddr = base;
120 init->CommHeaderAddress = cpu_to_le32((u32)phys);
121 /*
122 * Increment the base address by the size of the CommArea
123 */
124 base = base + commsize;
125 phys = phys + commsize;
126 /*
127 * Place the Printf buffer area after the Fast I/O comm area.
128 */
129 dev->printfbuf = (void *)base;
130 init->printfbuf = cpu_to_le32(phys);
131 init->printfbufsiz = cpu_to_le32(printfbufsiz);
132 memset(base, 0, printfbufsiz);
133 return 1;
134 }
135
136 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
137 {
138 q->numpending = 0;
139 q->dev = dev;
140 init_waitqueue_head(&q->cmdready);
141 INIT_LIST_HEAD(&q->cmdq);
142 init_waitqueue_head(&q->qfull);
143 spin_lock_init(&q->lockdata);
144 q->lock = &q->lockdata;
145 q->headers.producer = (__le32 *)mem;
146 q->headers.consumer = (__le32 *)(mem+1);
147 *(q->headers.producer) = cpu_to_le32(qsize);
148 *(q->headers.consumer) = cpu_to_le32(qsize);
149 q->entries = qsize;
150 }
151
152 /**
153 * aac_send_shutdown - shutdown an adapter
154 * @dev: Adapter to shutdown
155 *
156 * This routine will send a VM_CloseAll (shutdown) request to the adapter.
157 */
158
159 int aac_send_shutdown(struct aac_dev * dev)
160 {
161 struct fib * fibctx;
162 struct aac_close *cmd;
163 int status;
164
165 fibctx = aac_fib_alloc(dev);
166 if (!fibctx)
167 return -ENOMEM;
168 aac_fib_init(fibctx);
169
170 cmd = (struct aac_close *) fib_data(fibctx);
171
172 cmd->command = cpu_to_le32(VM_CloseAll);
173 cmd->cid = cpu_to_le32(0xffffffff);
174
175 status = aac_fib_send(ContainerCommand,
176 fibctx,
177 sizeof(struct aac_close),
178 FsaNormal,
179 -2 /* Timeout silently */, 1,
180 NULL, NULL);
181
182 if (status >= 0)
183 aac_fib_complete(fibctx);
184 aac_fib_free(fibctx);
185 return status;
186 }
187
188 /**
189 * aac_comm_init - Initialise FSA data structures
190 * @dev: Adapter to initialise
191 *
192 * Initializes the data structures that are required for the FSA commuication
193 * interface to operate.
194 * Returns
195 * 1 - if we were able to init the commuication interface.
196 * 0 - If there were errors initing. This is a fatal error.
197 */
198
199 static int aac_comm_init(struct aac_dev * dev)
200 {
201 unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
202 unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
203 u32 *headers;
204 struct aac_entry * queues;
205 unsigned long size;
206 struct aac_queue_block * comm = dev->queues;
207 /*
208 * Now allocate and initialize the zone structures used as our
209 * pool of FIB context records. The size of the zone is based
210 * on the system memory size. We also initialize the mutex used
211 * to protect the zone.
212 */
213 spin_lock_init(&dev->fib_lock);
214
215 /*
216 * Allocate the physically contigous space for the commuication
217 * queue headers.
218 */
219
220 size = hdrsize + queuesize;
221
222 if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
223 return -ENOMEM;
224
225 queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
226
227 /* Adapter to Host normal priority Command queue */
228 comm->queue[HostNormCmdQueue].base = queues;
229 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
230 queues += HOST_NORM_CMD_ENTRIES;
231 headers += 2;
232
233 /* Adapter to Host high priority command queue */
234 comm->queue[HostHighCmdQueue].base = queues;
235 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
236
237 queues += HOST_HIGH_CMD_ENTRIES;
238 headers +=2;
239
240 /* Host to adapter normal priority command queue */
241 comm->queue[AdapNormCmdQueue].base = queues;
242 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
243
244 queues += ADAP_NORM_CMD_ENTRIES;
245 headers += 2;
246
247 /* host to adapter high priority command queue */
248 comm->queue[AdapHighCmdQueue].base = queues;
249 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
250
251 queues += ADAP_HIGH_CMD_ENTRIES;
252 headers += 2;
253
254 /* adapter to host normal priority response queue */
255 comm->queue[HostNormRespQueue].base = queues;
256 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
257 queues += HOST_NORM_RESP_ENTRIES;
258 headers += 2;
259
260 /* adapter to host high priority response queue */
261 comm->queue[HostHighRespQueue].base = queues;
262 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
263
264 queues += HOST_HIGH_RESP_ENTRIES;
265 headers += 2;
266
267 /* host to adapter normal priority response queue */
268 comm->queue[AdapNormRespQueue].base = queues;
269 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
270
271 queues += ADAP_NORM_RESP_ENTRIES;
272 headers += 2;
273
274 /* host to adapter high priority response queue */
275 comm->queue[AdapHighRespQueue].base = queues;
276 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
277
278 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
279 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
280 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
281 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
282
283 return 0;
284 }
285
286 struct aac_dev *aac_init_adapter(struct aac_dev *dev)
287 {
288 u32 status[5];
289 struct Scsi_Host * host = dev->scsi_host_ptr;
290
291 /*
292 * Check the preferred comm settings, defaults from template.
293 */
294 dev->max_fib_size = sizeof(struct hw_fib);
295 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
296 - sizeof(struct aac_fibhdr)
297 - sizeof(struct aac_write) + sizeof(struct sgentry))
298 / sizeof(struct sgentry);
299 dev->comm_interface = AAC_COMM_PRODUCER;
300 dev->raw_io_64 = 0;
301 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
302 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
303 (status[0] == 0x00000001)) {
304 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
305 dev->raw_io_64 = 1;
306 if (dev->a_ops.adapter_comm &&
307 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
308 dev->comm_interface = AAC_COMM_MESSAGE;
309 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
310 (status[2] > dev->base_size)) {
311 aac_adapter_ioremap(dev, 0);
312 dev->base_size = status[2];
313 if (aac_adapter_ioremap(dev, status[2])) {
314 /* remap failed, go back ... */
315 dev->comm_interface = AAC_COMM_PRODUCER;
316 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
317 printk(KERN_WARNING
318 "aacraid: unable to map adapter.\n");
319 return NULL;
320 }
321 }
322 }
323 }
324 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
325 0, 0, 0, 0, 0, 0,
326 status+0, status+1, status+2, status+3, status+4))
327 && (status[0] == 0x00000001)) {
328 /*
329 * status[1] >> 16 maximum command size in KB
330 * status[1] & 0xFFFF maximum FIB size
331 * status[2] >> 16 maximum SG elements to driver
332 * status[2] & 0xFFFF maximum SG elements from driver
333 * status[3] & 0xFFFF maximum number FIBs outstanding
334 */
335 host->max_sectors = (status[1] >> 16) << 1;
336 dev->max_fib_size = status[1] & 0xFFFF;
337 host->sg_tablesize = status[2] >> 16;
338 dev->sg_tablesize = status[2] & 0xFFFF;
339 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
340 /*
341 * NOTE:
342 * All these overrides are based on a fixed internal
343 * knowledge and understanding of existing adapters,
344 * acbsize should be set with caution.
345 */
346 if (acbsize == 512) {
347 host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
348 dev->max_fib_size = 512;
349 dev->sg_tablesize = host->sg_tablesize
350 = (512 - sizeof(struct aac_fibhdr)
351 - sizeof(struct aac_write) + sizeof(struct sgentry))
352 / sizeof(struct sgentry);
353 host->can_queue = AAC_NUM_IO_FIB;
354 } else if (acbsize == 2048) {
355 host->max_sectors = 512;
356 dev->max_fib_size = 2048;
357 host->sg_tablesize = 65;
358 dev->sg_tablesize = 81;
359 host->can_queue = 512 - AAC_NUM_MGT_FIB;
360 } else if (acbsize == 4096) {
361 host->max_sectors = 1024;
362 dev->max_fib_size = 4096;
363 host->sg_tablesize = 129;
364 dev->sg_tablesize = 166;
365 host->can_queue = 256 - AAC_NUM_MGT_FIB;
366 } else if (acbsize == 8192) {
367 host->max_sectors = 2048;
368 dev->max_fib_size = 8192;
369 host->sg_tablesize = 257;
370 dev->sg_tablesize = 337;
371 host->can_queue = 128 - AAC_NUM_MGT_FIB;
372 } else if (acbsize > 0) {
373 printk("Illegal acbsize=%d ignored\n", acbsize);
374 }
375 }
376 {
377
378 if (numacb > 0) {
379 if (numacb < host->can_queue)
380 host->can_queue = numacb;
381 else
382 printk("numacb=%d ignored\n", numacb);
383 }
384 }
385
386 /*
387 * Ok now init the communication subsystem
388 */
389
390 dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
391 if (dev->queues == NULL) {
392 printk(KERN_ERR "Error could not allocate comm region.\n");
393 return NULL;
394 }
395
396 if (aac_comm_init(dev)<0){
397 kfree(dev->queues);
398 return NULL;
399 }
400 /*
401 * Initialize the list of fibs
402 */
403 if (aac_fib_setup(dev) < 0) {
404 kfree(dev->queues);
405 return NULL;
406 }
407
408 INIT_LIST_HEAD(&dev->fib_list);
409
410 return dev;
411 }
412
413