]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/rsxx/dma.c
block: IBM RamSan 70/80 device driver
[mirror_ubuntu-bionic-kernel.git] / drivers / block / rsxx / dma.c
CommitLineData
8722ff8c 1/*
2* Filename: dma.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include "rsxx_priv.h"
26
27struct rsxx_dma {
28 struct list_head list;
29 u8 cmd;
30 unsigned int laddr; /* Logical address on the ramsan */
31 struct {
32 u32 off;
33 u32 cnt;
34 } sub_page;
35 dma_addr_t dma_addr;
36 struct page *page;
37 unsigned int pg_off; /* Page Offset */
38 rsxx_dma_cb cb;
39 void *cb_data;
40};
41
42/* This timeout is used to detect a stalled DMA channel */
43#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
44
45struct hw_status {
46 u8 status;
47 u8 tag;
48 __le16 count;
49 __le32 _rsvd2;
50 __le64 _rsvd3;
51} __packed;
52
53enum rsxx_dma_status {
54 DMA_SW_ERR = 0x1,
55 DMA_HW_FAULT = 0x2,
56 DMA_CANCELLED = 0x4,
57};
58
59struct hw_cmd {
60 u8 command;
61 u8 tag;
62 u8 _rsvd;
63 u8 sub_page; /* Bit[0:2]: 512byte offset */
64 /* Bit[4:6]: 512byte count */
65 __le32 device_addr;
66 __le64 host_addr;
67} __packed;
68
69enum rsxx_hw_cmd {
70 HW_CMD_BLK_DISCARD = 0x70,
71 HW_CMD_BLK_WRITE = 0x80,
72 HW_CMD_BLK_READ = 0xC0,
73 HW_CMD_BLK_RECON_READ = 0xE0,
74};
75
76enum rsxx_hw_status {
77 HW_STATUS_CRC = 0x01,
78 HW_STATUS_HARD_ERR = 0x02,
79 HW_STATUS_SOFT_ERR = 0x04,
80 HW_STATUS_FAULT = 0x08,
81};
82
83#define STATUS_BUFFER_SIZE8 4096
84#define COMMAND_BUFFER_SIZE8 4096
85
86static struct kmem_cache *rsxx_dma_pool;
87
88struct dma_tracker {
89 int next_tag;
90 struct rsxx_dma *dma;
91};
92
93#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
94 (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
95
96struct dma_tracker_list {
97 spinlock_t lock;
98 int head;
99 struct dma_tracker list[0];
100};
101
102
103/*----------------- Misc Utility Functions -------------------*/
104unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
105{
106 unsigned long long tgt_addr8;
107
108 tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
109 card->_stripe.upper_mask) |
110 ((addr8) & card->_stripe.lower_mask);
111 do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
112 return tgt_addr8;
113}
114
115unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
116{
117 unsigned int tgt;
118
119 tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
120
121 return tgt;
122}
123
124static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
125{
126 /* Reset all DMA Command/Status Queues */
127 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
128}
129
130static unsigned int get_dma_size(struct rsxx_dma *dma)
131{
132 if (dma->sub_page.cnt)
133 return dma->sub_page.cnt << 9;
134 else
135 return RSXX_HW_BLK_SIZE;
136}
137
138
139/*----------------- DMA Tracker -------------------*/
140static void set_tracker_dma(struct dma_tracker_list *trackers,
141 int tag,
142 struct rsxx_dma *dma)
143{
144 trackers->list[tag].dma = dma;
145}
146
147static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
148 int tag)
149{
150 return trackers->list[tag].dma;
151}
152
153static int pop_tracker(struct dma_tracker_list *trackers)
154{
155 int tag;
156
157 spin_lock(&trackers->lock);
158 tag = trackers->head;
159 if (tag != -1) {
160 trackers->head = trackers->list[tag].next_tag;
161 trackers->list[tag].next_tag = -1;
162 }
163 spin_unlock(&trackers->lock);
164
165 return tag;
166}
167
168static void push_tracker(struct dma_tracker_list *trackers, int tag)
169{
170 spin_lock(&trackers->lock);
171 trackers->list[tag].next_tag = trackers->head;
172 trackers->head = tag;
173 trackers->list[tag].dma = NULL;
174 spin_unlock(&trackers->lock);
175}
176
177
178/*----------------- Interrupt Coalescing -------------*/
179/*
180 * Interrupt Coalescing Register Format:
181 * Interrupt Timer (64ns units) [15:0]
182 * Interrupt Count [24:16]
183 * Reserved [31:25]
184*/
185#define INTR_COAL_LATENCY_MASK (0x0000ffff)
186
187#define INTR_COAL_COUNT_SHIFT 16
188#define INTR_COAL_COUNT_BITS 9
189#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
190 INTR_COAL_COUNT_SHIFT)
191#define INTR_COAL_LATENCY_UNITS_NS 64
192
193
194static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
195{
196 u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
197
198 if (mode == RSXX_INTR_COAL_DISABLED)
199 return 0;
200
201 return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
202 (latency_units & INTR_COAL_LATENCY_MASK);
203
204}
205
206static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
207{
208 int i;
209 u32 q_depth = 0;
210 u32 intr_coal;
211
212 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
213 return;
214
215 for (i = 0; i < card->n_targets; i++)
216 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
217
218 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
219 q_depth / 2,
220 card->config.data.intr_coal.latency);
221 iowrite32(intr_coal, card->regmap + INTR_COAL);
222}
223
224/*----------------- RSXX DMA Handling -------------------*/
225static void rsxx_complete_dma(struct rsxx_cardinfo *card,
226 struct rsxx_dma *dma,
227 unsigned int status)
228{
229 if (status & DMA_SW_ERR)
230 printk_ratelimited(KERN_ERR
231 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
232 dma->cmd, dma->laddr);
233 if (status & DMA_HW_FAULT)
234 printk_ratelimited(KERN_ERR
235 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
236 dma->cmd, dma->laddr);
237 if (status & DMA_CANCELLED)
238 printk_ratelimited(KERN_ERR
239 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
240 dma->cmd, dma->laddr);
241
242 if (dma->dma_addr)
243 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
244 dma->cmd == HW_CMD_BLK_WRITE ?
245 PCI_DMA_TODEVICE :
246 PCI_DMA_FROMDEVICE);
247
248 if (dma->cb)
249 dma->cb(card, dma->cb_data, status ? 1 : 0);
250
251 kmem_cache_free(rsxx_dma_pool, dma);
252}
253
254static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
255 struct rsxx_dma *dma)
256{
257 /*
258 * Requeued DMAs go to the front of the queue so they are issued
259 * first.
260 */
261 spin_lock(&ctrl->queue_lock);
262 list_add(&dma->list, &ctrl->queue);
263 spin_unlock(&ctrl->queue_lock);
264}
265
266static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
267 struct rsxx_dma *dma,
268 u8 hw_st)
269{
270 unsigned int status = 0;
271 int requeue_cmd = 0;
272
273 dev_dbg(CARD_TO_DEV(ctrl->card),
274 "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
275 dma->cmd, dma->laddr, hw_st);
276
277 if (hw_st & HW_STATUS_CRC)
278 ctrl->stats.crc_errors++;
279 if (hw_st & HW_STATUS_HARD_ERR)
280 ctrl->stats.hard_errors++;
281 if (hw_st & HW_STATUS_SOFT_ERR)
282 ctrl->stats.soft_errors++;
283
284 switch (dma->cmd) {
285 case HW_CMD_BLK_READ:
286 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
287 if (ctrl->card->scrub_hard) {
288 dma->cmd = HW_CMD_BLK_RECON_READ;
289 requeue_cmd = 1;
290 ctrl->stats.reads_retried++;
291 } else {
292 status |= DMA_HW_FAULT;
293 ctrl->stats.reads_failed++;
294 }
295 } else if (hw_st & HW_STATUS_FAULT) {
296 status |= DMA_HW_FAULT;
297 ctrl->stats.reads_failed++;
298 }
299
300 break;
301 case HW_CMD_BLK_RECON_READ:
302 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
303 /* Data could not be reconstructed. */
304 status |= DMA_HW_FAULT;
305 ctrl->stats.reads_failed++;
306 }
307
308 break;
309 case HW_CMD_BLK_WRITE:
310 status |= DMA_HW_FAULT;
311 ctrl->stats.writes_failed++;
312
313 break;
314 case HW_CMD_BLK_DISCARD:
315 status |= DMA_HW_FAULT;
316 ctrl->stats.discards_failed++;
317
318 break;
319 default:
320 dev_err(CARD_TO_DEV(ctrl->card),
321 "Unknown command in DMA!(cmd: x%02x "
322 "laddr x%08x st: x%02x\n",
323 dma->cmd, dma->laddr, hw_st);
324 status |= DMA_SW_ERR;
325
326 break;
327 }
328
329 if (requeue_cmd)
330 rsxx_requeue_dma(ctrl, dma);
331 else
332 rsxx_complete_dma(ctrl->card, dma, status);
333}
334
335static void dma_engine_stalled(unsigned long data)
336{
337 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
338
339 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
340 return;
341
342 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
343 /*
344 * The dma engine was stalled because the SW_CMD_IDX write
345 * was lost. Issue it again to recover.
346 */
347 dev_warn(CARD_TO_DEV(ctrl->card),
348 "SW_CMD_IDX write was lost, re-writing...\n");
349 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
350 mod_timer(&ctrl->activity_timer,
351 jiffies + DMA_ACTIVITY_TIMEOUT);
352 } else {
353 dev_warn(CARD_TO_DEV(ctrl->card),
354 "DMA channel %d has stalled, faulting interface.\n",
355 ctrl->id);
356 ctrl->card->dma_fault = 1;
357 }
358}
359
360static void rsxx_issue_dmas(struct work_struct *work)
361{
362 struct rsxx_dma_ctrl *ctrl;
363 struct rsxx_dma *dma;
364 int tag;
365 int cmds_pending = 0;
366 struct hw_cmd *hw_cmd_buf;
367
368 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
369 hw_cmd_buf = ctrl->cmd.buf;
370
371 if (unlikely(ctrl->card->halt))
372 return;
373
374 while (1) {
375 spin_lock(&ctrl->queue_lock);
376 if (list_empty(&ctrl->queue)) {
377 spin_unlock(&ctrl->queue_lock);
378 break;
379 }
380 spin_unlock(&ctrl->queue_lock);
381
382 tag = pop_tracker(ctrl->trackers);
383 if (tag == -1)
384 break;
385
386 spin_lock(&ctrl->queue_lock);
387 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
388 list_del(&dma->list);
389 ctrl->stats.sw_q_depth--;
390 spin_unlock(&ctrl->queue_lock);
391
392 /*
393 * This will catch any DMAs that slipped in right before the
394 * fault, but was queued after all the other DMAs were
395 * cancelled.
396 */
397 if (unlikely(ctrl->card->dma_fault)) {
398 push_tracker(ctrl->trackers, tag);
399 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
400 continue;
401 }
402
403 set_tracker_dma(ctrl->trackers, tag, dma);
404 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
405 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
406 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
407 hw_cmd_buf[ctrl->cmd.idx].sub_page =
408 ((dma->sub_page.cnt & 0x7) << 4) |
409 (dma->sub_page.off & 0x7);
410
411 hw_cmd_buf[ctrl->cmd.idx].device_addr =
412 cpu_to_le32(dma->laddr);
413
414 hw_cmd_buf[ctrl->cmd.idx].host_addr =
415 cpu_to_le64(dma->dma_addr);
416
417 dev_dbg(CARD_TO_DEV(ctrl->card),
418 "Issue DMA%d(laddr %d tag %d) to idx %d\n",
419 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
420
421 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
422 cmds_pending++;
423
424 if (dma->cmd == HW_CMD_BLK_WRITE)
425 ctrl->stats.writes_issued++;
426 else if (dma->cmd == HW_CMD_BLK_DISCARD)
427 ctrl->stats.discards_issued++;
428 else
429 ctrl->stats.reads_issued++;
430 }
431
432 /* Let HW know we've queued commands. */
433 if (cmds_pending) {
434 /*
435 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
436 * (which is in PCI-consistent system-memory) from the loop
437 * above make it into the coherency domain before the
438 * following PIO "trigger" updating the cmd.idx. A WMB is
439 * sufficient. We need not explicitly CPU cache-flush since
440 * the memory is a PCI-consistent (ie; coherent) mapping.
441 */
442 wmb();
443
444 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
445 mod_timer(&ctrl->activity_timer,
446 jiffies + DMA_ACTIVITY_TIMEOUT);
447 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
448 }
449}
450
451static void rsxx_dma_done(struct work_struct *work)
452{
453 struct rsxx_dma_ctrl *ctrl;
454 struct rsxx_dma *dma;
455 unsigned long flags;
456 u16 count;
457 u8 status;
458 u8 tag;
459 struct hw_status *hw_st_buf;
460
461 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
462 hw_st_buf = ctrl->status.buf;
463
464 if (unlikely(ctrl->card->halt) ||
465 unlikely(ctrl->card->dma_fault))
466 return;
467
468 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
469
470 while (count == ctrl->e_cnt) {
471 /*
472 * The read memory-barrier is necessary to keep aggressive
473 * processors/optimizers (such as the PPC Apple G5) from
474 * reordering the following status-buffer tag & status read
475 * *before* the count read on subsequent iterations of the
476 * loop!
477 */
478 rmb();
479
480 status = hw_st_buf[ctrl->status.idx].status;
481 tag = hw_st_buf[ctrl->status.idx].tag;
482
483 dma = get_tracker_dma(ctrl->trackers, tag);
484 if (dma == NULL) {
485 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
486 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
487 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
488
489 dev_err(CARD_TO_DEV(ctrl->card),
490 "No tracker for tag %d "
491 "(idx %d id %d)\n",
492 tag, ctrl->status.idx, ctrl->id);
493 return;
494 }
495
496 dev_dbg(CARD_TO_DEV(ctrl->card),
497 "Completing DMA%d"
498 "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
499 ctrl->id, dma->laddr, tag, status, count,
500 ctrl->status.idx);
501
502 atomic_dec(&ctrl->stats.hw_q_depth);
503
504 mod_timer(&ctrl->activity_timer,
505 jiffies + DMA_ACTIVITY_TIMEOUT);
506
507 if (status)
508 rsxx_handle_dma_error(ctrl, dma, status);
509 else
510 rsxx_complete_dma(ctrl->card, dma, 0);
511
512 push_tracker(ctrl->trackers, tag);
513
514 ctrl->status.idx = (ctrl->status.idx + 1) &
515 RSXX_CS_IDX_MASK;
516 ctrl->e_cnt++;
517
518 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
519 }
520
521 dma_intr_coal_auto_tune(ctrl->card);
522
523 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
524 del_timer_sync(&ctrl->activity_timer);
525
526 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
527 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
528 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
529
530 spin_lock(&ctrl->queue_lock);
531 if (ctrl->stats.sw_q_depth)
532 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
533 spin_unlock(&ctrl->queue_lock);
534}
535
536static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
537 struct list_head *q)
538{
539 struct rsxx_dma *dma;
540 struct rsxx_dma *tmp;
541 int cnt = 0;
542
543 list_for_each_entry_safe(dma, tmp, q, list) {
544 list_del(&dma->list);
545
546 if (dma->dma_addr)
547 pci_unmap_page(card->dev, dma->dma_addr,
548 get_dma_size(dma),
549 (dma->cmd == HW_CMD_BLK_WRITE) ?
550 PCI_DMA_TODEVICE :
551 PCI_DMA_FROMDEVICE);
552 kmem_cache_free(rsxx_dma_pool, dma);
553 cnt++;
554 }
555
556 return cnt;
557}
558
559static int rsxx_queue_discard(struct rsxx_cardinfo *card,
560 struct list_head *q,
561 unsigned int laddr,
562 rsxx_dma_cb cb,
563 void *cb_data)
564{
565 struct rsxx_dma *dma;
566
567 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
568 if (!dma)
569 return -ENOMEM;
570
571 dma->cmd = HW_CMD_BLK_DISCARD;
572 dma->laddr = laddr;
573 dma->dma_addr = 0;
574 dma->sub_page.off = 0;
575 dma->sub_page.cnt = 0;
576 dma->page = NULL;
577 dma->pg_off = 0;
578 dma->cb = cb;
579 dma->cb_data = cb_data;
580
581 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
582
583 list_add_tail(&dma->list, q);
584
585 return 0;
586}
587
588static int rsxx_queue_dma(struct rsxx_cardinfo *card,
589 struct list_head *q,
590 int dir,
591 unsigned int dma_off,
592 unsigned int dma_len,
593 unsigned int laddr,
594 struct page *page,
595 unsigned int pg_off,
596 rsxx_dma_cb cb,
597 void *cb_data)
598{
599 struct rsxx_dma *dma;
600
601 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
602 if (!dma)
603 return -ENOMEM;
604
605 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
606 dir ? PCI_DMA_TODEVICE :
607 PCI_DMA_FROMDEVICE);
608 if (!dma->dma_addr) {
609 kmem_cache_free(rsxx_dma_pool, dma);
610 return -ENOMEM;
611 }
612
613 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
614 dma->laddr = laddr;
615 dma->sub_page.off = (dma_off >> 9);
616 dma->sub_page.cnt = (dma_len >> 9);
617 dma->page = page;
618 dma->pg_off = pg_off;
619 dma->cb = cb;
620 dma->cb_data = cb_data;
621
622 dev_dbg(CARD_TO_DEV(card),
623 "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
624 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
625 dma->sub_page.cnt, dma->page, dma->pg_off);
626
627 /* Queue the DMA */
628 list_add_tail(&dma->list, q);
629
630 return 0;
631}
632
633int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
634 struct bio *bio,
635 atomic_t *n_dmas,
636 rsxx_dma_cb cb,
637 void *cb_data)
638{
639 struct list_head dma_list[RSXX_MAX_TARGETS];
640 struct bio_vec *bvec;
641 unsigned long long addr8;
642 unsigned int laddr;
643 unsigned int bv_len;
644 unsigned int bv_off;
645 unsigned int dma_off;
646 unsigned int dma_len;
647 int dma_cnt[RSXX_MAX_TARGETS];
648 int tgt;
649 int st;
650 int i;
651
652 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
653 atomic_set(n_dmas, 0);
654
655 for (i = 0; i < card->n_targets; i++) {
656 INIT_LIST_HEAD(&dma_list[i]);
657 dma_cnt[i] = 0;
658 }
659
660 if (bio->bi_rw & REQ_DISCARD) {
661 bv_len = bio->bi_size;
662
663 while (bv_len > 0) {
664 tgt = rsxx_get_dma_tgt(card, addr8);
665 laddr = rsxx_addr8_to_laddr(addr8, card);
666
667 st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
668 cb, cb_data);
669 if (st)
670 goto bvec_err;
671
672 dma_cnt[tgt]++;
673 atomic_inc(n_dmas);
674 addr8 += RSXX_HW_BLK_SIZE;
675 bv_len -= RSXX_HW_BLK_SIZE;
676 }
677 } else {
678 bio_for_each_segment(bvec, bio, i) {
679 bv_len = bvec->bv_len;
680 bv_off = bvec->bv_offset;
681
682 while (bv_len > 0) {
683 tgt = rsxx_get_dma_tgt(card, addr8);
684 laddr = rsxx_addr8_to_laddr(addr8, card);
685 dma_off = addr8 & RSXX_HW_BLK_MASK;
686 dma_len = min(bv_len,
687 RSXX_HW_BLK_SIZE - dma_off);
688
689 st = rsxx_queue_dma(card, &dma_list[tgt],
690 bio_data_dir(bio),
691 dma_off, dma_len,
692 laddr, bvec->bv_page,
693 bv_off, cb, cb_data);
694 if (st)
695 goto bvec_err;
696
697 dma_cnt[tgt]++;
698 atomic_inc(n_dmas);
699 addr8 += dma_len;
700 bv_off += dma_len;
701 bv_len -= dma_len;
702 }
703 }
704 }
705
706 for (i = 0; i < card->n_targets; i++) {
707 if (!list_empty(&dma_list[i])) {
708 spin_lock(&card->ctrl[i].queue_lock);
709 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
710 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
711 spin_unlock(&card->ctrl[i].queue_lock);
712
713 queue_work(card->ctrl[i].issue_wq,
714 &card->ctrl[i].issue_dma_work);
715 }
716 }
717
718 return 0;
719
720bvec_err:
721 for (i = 0; i < card->n_targets; i++)
722 rsxx_cleanup_dma_queue(card, &dma_list[i]);
723
724 return st;
725}
726
727
728/*----------------- DMA Engine Initialization & Setup -------------------*/
729static int rsxx_dma_ctrl_init(struct pci_dev *dev,
730 struct rsxx_dma_ctrl *ctrl)
731{
732 int i;
733
734 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
735
736 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
737 &ctrl->status.dma_addr);
738 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
739 &ctrl->cmd.dma_addr);
740 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
741 return -ENOMEM;
742
743 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
744 if (!ctrl->trackers)
745 return -ENOMEM;
746
747 ctrl->trackers->head = 0;
748 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
749 ctrl->trackers->list[i].next_tag = i + 1;
750 ctrl->trackers->list[i].dma = NULL;
751 }
752 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
753 spin_lock_init(&ctrl->trackers->lock);
754
755 spin_lock_init(&ctrl->queue_lock);
756 INIT_LIST_HEAD(&ctrl->queue);
757
758 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
759 (unsigned long)ctrl);
760
761 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
762 if (!ctrl->issue_wq)
763 return -ENOMEM;
764
765 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
766 if (!ctrl->done_wq)
767 return -ENOMEM;
768
769 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
770 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
771
772 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
773 iowrite32(lower_32_bits(ctrl->status.dma_addr),
774 ctrl->regmap + SB_ADD_LO);
775 iowrite32(upper_32_bits(ctrl->status.dma_addr),
776 ctrl->regmap + SB_ADD_HI);
777
778 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
779 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
780 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
781
782 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
783 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
784 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
785 ctrl->status.idx);
786 return -EINVAL;
787 }
788 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
789 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
790
791 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
792 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
793 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
794 ctrl->status.idx);
795 return -EINVAL;
796 }
797 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
799
800 wmb();
801
802 return 0;
803}
804
805int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
806 unsigned int stripe_size8)
807{
808 if (!is_power_of_2(stripe_size8)) {
809 dev_err(CARD_TO_DEV(card),
810 "stripe_size is NOT a power of 2!\n");
811 return -EINVAL;
812 }
813
814 card->_stripe.lower_mask = stripe_size8 - 1;
815
816 card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
817 card->_stripe.upper_shift = ffs(card->n_targets) - 1;
818
819 card->_stripe.target_mask = card->n_targets - 1;
820 card->_stripe.target_shift = ffs(stripe_size8) - 1;
821
822 dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
823 card->_stripe.lower_mask);
824 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
825 card->_stripe.upper_shift);
826 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
827 card->_stripe.upper_mask);
828 dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
829 card->_stripe.target_mask);
830 dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
831 card->_stripe.target_shift);
832
833 return 0;
834}
835
836int rsxx_dma_configure(struct rsxx_cardinfo *card)
837{
838 u32 intr_coal;
839
840 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
841 card->config.data.intr_coal.count,
842 card->config.data.intr_coal.latency);
843 iowrite32(intr_coal, card->regmap + INTR_COAL);
844
845 return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
846}
847
848int rsxx_dma_setup(struct rsxx_cardinfo *card)
849{
850 unsigned long flags;
851 int st;
852 int i;
853
854 dev_info(CARD_TO_DEV(card),
855 "Initializing %d DMA targets\n",
856 card->n_targets);
857
858 /* Regmap is divided up into 4K chunks. One for each DMA channel */
859 for (i = 0; i < card->n_targets; i++)
860 card->ctrl[i].regmap = card->regmap + (i * 4096);
861
862 card->dma_fault = 0;
863
864 /* Reset the DMA queues */
865 rsxx_dma_queue_reset(card);
866
867 /************* Setup DMA Control *************/
868 for (i = 0; i < card->n_targets; i++) {
869 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
870 if (st)
871 goto failed_dma_setup;
872
873 card->ctrl[i].card = card;
874 card->ctrl[i].id = i;
875 }
876
877 card->scrub_hard = 1;
878
879 if (card->config_valid)
880 rsxx_dma_configure(card);
881
882 /* Enable the interrupts after all setup has completed. */
883 for (i = 0; i < card->n_targets; i++) {
884 spin_lock_irqsave(&card->irq_lock, flags);
885 rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
886 spin_unlock_irqrestore(&card->irq_lock, flags);
887 }
888
889 return 0;
890
891failed_dma_setup:
892 for (i = 0; i < card->n_targets; i++) {
893 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
894
895 if (ctrl->issue_wq) {
896 destroy_workqueue(ctrl->issue_wq);
897 ctrl->issue_wq = NULL;
898 }
899
900 if (ctrl->done_wq) {
901 destroy_workqueue(ctrl->done_wq);
902 ctrl->done_wq = NULL;
903 }
904
905 if (ctrl->trackers)
906 vfree(ctrl->trackers);
907
908 if (ctrl->status.buf)
909 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
910 ctrl->status.buf,
911 ctrl->status.dma_addr);
912 if (ctrl->cmd.buf)
913 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
914 ctrl->cmd.buf, ctrl->cmd.dma_addr);
915 }
916
917 return st;
918}
919
920
921void rsxx_dma_destroy(struct rsxx_cardinfo *card)
922{
923 struct rsxx_dma_ctrl *ctrl;
924 struct rsxx_dma *dma;
925 int i, j;
926 int cnt = 0;
927
928 for (i = 0; i < card->n_targets; i++) {
929 ctrl = &card->ctrl[i];
930
931 if (ctrl->issue_wq) {
932 destroy_workqueue(ctrl->issue_wq);
933 ctrl->issue_wq = NULL;
934 }
935
936 if (ctrl->done_wq) {
937 destroy_workqueue(ctrl->done_wq);
938 ctrl->done_wq = NULL;
939 }
940
941 if (timer_pending(&ctrl->activity_timer))
942 del_timer_sync(&ctrl->activity_timer);
943
944 /* Clean up the DMA queue */
945 spin_lock(&ctrl->queue_lock);
946 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
947 spin_unlock(&ctrl->queue_lock);
948
949 if (cnt)
950 dev_info(CARD_TO_DEV(card),
951 "Freed %d queued DMAs on channel %d\n",
952 cnt, i);
953
954 /* Clean up issued DMAs */
955 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
956 dma = get_tracker_dma(ctrl->trackers, j);
957 if (dma) {
958 pci_unmap_page(card->dev, dma->dma_addr,
959 get_dma_size(dma),
960 (dma->cmd == HW_CMD_BLK_WRITE) ?
961 PCI_DMA_TODEVICE :
962 PCI_DMA_FROMDEVICE);
963 kmem_cache_free(rsxx_dma_pool, dma);
964 cnt++;
965 }
966 }
967
968 if (cnt)
969 dev_info(CARD_TO_DEV(card),
970 "Freed %d pending DMAs on channel %d\n",
971 cnt, i);
972
973 vfree(ctrl->trackers);
974
975 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
976 ctrl->status.buf, ctrl->status.dma_addr);
977 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
978 ctrl->cmd.buf, ctrl->cmd.dma_addr);
979 }
980}
981
982
983int rsxx_dma_init(void)
984{
985 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
986 if (!rsxx_dma_pool)
987 return -ENOMEM;
988
989 return 0;
990}
991
992
993void rsxx_dma_cleanup(void)
994{
995 kmem_cache_destroy(rsxx_dma_pool);
996}
997