2 * Copyright (C) 2017 Broadcom
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 * Broadcom SBA RAID Driver
17 * The Broadcom stream buffer accelerator (SBA) provides offloading
18 * capabilities for RAID operations. The SBA offload engine is accessible
19 * via Broadcom SoC specific ring manager. Two or more offload engines
20 * can share same Broadcom SoC specific ring manager due to this Broadcom
21 * SoC specific ring manager driver is implemented as a mailbox controller
22 * driver and offload engine drivers are implemented as mallbox clients.
24 * Typically, Broadcom SoC specific ring manager will implement larger
25 * number of hardware rings over one or more SBA hardware devices. By
26 * design, the internal buffer size of SBA hardware device is limited
27 * but all offload operations supported by SBA can be broken down into
28 * multiple small size requests and executed parallely on multiple SBA
29 * hardware devices for achieving high through-put.
31 * The Broadcom SBA RAID driver does not require any register programming
32 * except submitting request to SBA hardware device via mailbox channels.
33 * This driver implements a DMA device with one DMA channel using a single
34 * mailbox channel provided by Broadcom SoC specific ring manager driver.
35 * For having more SBA DMA channels, we can create more SBA device nodes
36 * in Broadcom SoC specific DTS based on number of hardware rings supported
37 * by Broadcom SoC ring manager.
40 #include <linux/bitops.h>
41 #include <linux/debugfs.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dmaengine.h>
44 #include <linux/list.h>
45 #include <linux/mailbox_client.h>
46 #include <linux/mailbox/brcm-message.h>
47 #include <linux/module.h>
48 #include <linux/of_device.h>
49 #include <linux/slab.h>
50 #include <linux/raid/pq.h>
52 #include "dmaengine.h"
54 /* ====== Driver macros and defines ===== */
56 #define SBA_TYPE_SHIFT 48
57 #define SBA_TYPE_MASK GENMASK(1, 0)
58 #define SBA_TYPE_A 0x0
59 #define SBA_TYPE_B 0x2
60 #define SBA_TYPE_C 0x3
61 #define SBA_USER_DEF_SHIFT 32
62 #define SBA_USER_DEF_MASK GENMASK(15, 0)
63 #define SBA_R_MDATA_SHIFT 24
64 #define SBA_R_MDATA_MASK GENMASK(7, 0)
65 #define SBA_C_MDATA_MS_SHIFT 18
66 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
67 #define SBA_INT_SHIFT 17
68 #define SBA_INT_MASK BIT(0)
69 #define SBA_RESP_SHIFT 16
70 #define SBA_RESP_MASK BIT(0)
71 #define SBA_C_MDATA_SHIFT 8
72 #define SBA_C_MDATA_MASK GENMASK(7, 0)
73 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
74 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
75 #define SBA_C_MDATA_DNUM_SHIFT 5
76 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
77 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
78 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
79 #define SBA_CMD_SHIFT 0
80 #define SBA_CMD_MASK GENMASK(3, 0)
81 #define SBA_CMD_ZERO_BUFFER 0x4
82 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
83 #define SBA_CMD_LOAD_BUFFER 0x9
84 #define SBA_CMD_XOR 0xa
85 #define SBA_CMD_GALOIS_XOR 0xb
86 #define SBA_CMD_WRITE_BUFFER 0xc
87 #define SBA_CMD_GALOIS 0xe
89 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
90 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
92 /* Driver helper macros */
93 #define to_sba_request(tx) \
94 container_of(tx, struct sba_request, tx)
95 #define to_sba_device(dchan) \
96 container_of(dchan, struct sba_device, dma_chan)
98 /* ===== Driver data structures ===== */
100 enum sba_request_flags
{
101 SBA_REQUEST_STATE_FREE
= 0x001,
102 SBA_REQUEST_STATE_ALLOCED
= 0x002,
103 SBA_REQUEST_STATE_PENDING
= 0x004,
104 SBA_REQUEST_STATE_ACTIVE
= 0x008,
105 SBA_REQUEST_STATE_ABORTED
= 0x010,
106 SBA_REQUEST_STATE_MASK
= 0x0ff,
107 SBA_REQUEST_FENCE
= 0x100,
112 struct list_head node
;
113 struct sba_device
*sba
;
115 /* Chained requests management */
116 struct sba_request
*first
;
117 struct list_head next
;
118 atomic_t next_pending_count
;
119 /* BRCM message data */
120 struct brcm_message msg
;
121 struct dma_async_tx_descriptor tx
;
123 struct brcm_sba_command cmds
[0];
132 /* Underlying device */
134 /* DT configuration parameters */
135 enum sba_version ver
;
136 /* Derived configuration parameters */
144 u32 max_resp_pool_size
;
145 u32 max_cmds_pool_size
;
146 /* Maibox client and Mailbox channels */
147 struct mbox_client client
;
148 struct mbox_chan
*mchan
;
149 struct device
*mbox_dev
;
150 /* DMA device and DMA channel */
151 struct dma_device dma_dev
;
152 struct dma_chan dma_chan
;
153 /* DMA channel resources */
155 dma_addr_t resp_dma_base
;
157 dma_addr_t cmds_dma_base
;
158 spinlock_t reqs_lock
;
160 struct list_head reqs_alloc_list
;
161 struct list_head reqs_pending_list
;
162 struct list_head reqs_active_list
;
163 struct list_head reqs_aborted_list
;
164 struct list_head reqs_free_list
;
165 /* DebugFS directory entries */
167 struct dentry
*stats
;
170 /* ====== Command helper routines ===== */
172 static inline u64 __pure
sba_cmd_enc(u64 cmd
, u32 val
, u32 shift
, u32 mask
)
174 cmd
&= ~((u64
)mask
<< shift
);
175 cmd
|= ((u64
)(val
& mask
) << shift
);
179 static inline u32 __pure
sba_cmd_load_c_mdata(u32 b0
)
181 return b0
& SBA_C_MDATA_BNUMx_MASK
;
184 static inline u32 __pure
sba_cmd_write_c_mdata(u32 b0
)
186 return b0
& SBA_C_MDATA_BNUMx_MASK
;
189 static inline u32 __pure
sba_cmd_xor_c_mdata(u32 b1
, u32 b0
)
191 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
192 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1));
195 static inline u32 __pure
sba_cmd_pq_c_mdata(u32 d
, u32 b1
, u32 b0
)
197 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
198 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
199 ((d
& SBA_C_MDATA_DNUM_MASK
) << SBA_C_MDATA_DNUM_SHIFT
);
202 /* ====== General helper routines ===== */
204 static struct sba_request
*sba_alloc_request(struct sba_device
*sba
)
208 struct sba_request
*req
= NULL
;
210 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
211 list_for_each_entry(req
, &sba
->reqs_free_list
, node
) {
212 if (async_tx_test_ack(&req
->tx
)) {
213 list_move_tail(&req
->node
, &sba
->reqs_alloc_list
);
218 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
222 * We have no more free requests so, we peek
223 * mailbox channels hoping few active requests
224 * would have completed which will create more
225 * room for new requests.
227 mbox_client_peek_data(sba
->mchan
);
231 req
->flags
= SBA_REQUEST_STATE_ALLOCED
;
233 INIT_LIST_HEAD(&req
->next
);
234 atomic_set(&req
->next_pending_count
, 1);
236 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
237 async_tx_ack(&req
->tx
);
242 /* Note: Must be called with sba->reqs_lock held */
243 static void _sba_pending_request(struct sba_device
*sba
,
244 struct sba_request
*req
)
246 lockdep_assert_held(&sba
->reqs_lock
);
247 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
248 req
->flags
|= SBA_REQUEST_STATE_PENDING
;
249 list_move_tail(&req
->node
, &sba
->reqs_pending_list
);
250 if (list_empty(&sba
->reqs_active_list
))
251 sba
->reqs_fence
= false;
254 /* Note: Must be called with sba->reqs_lock held */
255 static bool _sba_active_request(struct sba_device
*sba
,
256 struct sba_request
*req
)
258 lockdep_assert_held(&sba
->reqs_lock
);
259 if (list_empty(&sba
->reqs_active_list
))
260 sba
->reqs_fence
= false;
263 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
264 req
->flags
|= SBA_REQUEST_STATE_ACTIVE
;
265 list_move_tail(&req
->node
, &sba
->reqs_active_list
);
266 if (req
->flags
& SBA_REQUEST_FENCE
)
267 sba
->reqs_fence
= true;
271 /* Note: Must be called with sba->reqs_lock held */
272 static void _sba_abort_request(struct sba_device
*sba
,
273 struct sba_request
*req
)
275 lockdep_assert_held(&sba
->reqs_lock
);
276 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
277 req
->flags
|= SBA_REQUEST_STATE_ABORTED
;
278 list_move_tail(&req
->node
, &sba
->reqs_aborted_list
);
279 if (list_empty(&sba
->reqs_active_list
))
280 sba
->reqs_fence
= false;
283 /* Note: Must be called with sba->reqs_lock held */
284 static void _sba_free_request(struct sba_device
*sba
,
285 struct sba_request
*req
)
287 lockdep_assert_held(&sba
->reqs_lock
);
288 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
289 req
->flags
|= SBA_REQUEST_STATE_FREE
;
290 list_move_tail(&req
->node
, &sba
->reqs_free_list
);
291 if (list_empty(&sba
->reqs_active_list
))
292 sba
->reqs_fence
= false;
295 static void sba_free_chained_requests(struct sba_request
*req
)
298 struct sba_request
*nreq
;
299 struct sba_device
*sba
= req
->sba
;
301 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
303 _sba_free_request(sba
, req
);
304 list_for_each_entry(nreq
, &req
->next
, next
)
305 _sba_free_request(sba
, nreq
);
307 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
310 static void sba_chain_request(struct sba_request
*first
,
311 struct sba_request
*req
)
314 struct sba_device
*sba
= req
->sba
;
316 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
318 list_add_tail(&req
->next
, &first
->next
);
320 atomic_inc(&first
->next_pending_count
);
322 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
325 static void sba_cleanup_nonpending_requests(struct sba_device
*sba
)
328 struct sba_request
*req
, *req1
;
330 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
332 /* Freeup all alloced request */
333 list_for_each_entry_safe(req
, req1
, &sba
->reqs_alloc_list
, node
)
334 _sba_free_request(sba
, req
);
336 /* Set all active requests as aborted */
337 list_for_each_entry_safe(req
, req1
, &sba
->reqs_active_list
, node
)
338 _sba_abort_request(sba
, req
);
341 * Note: We expect that aborted request will be eventually
342 * freed by sba_receive_message()
345 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
348 static void sba_cleanup_pending_requests(struct sba_device
*sba
)
351 struct sba_request
*req
, *req1
;
353 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
355 /* Freeup all pending request */
356 list_for_each_entry_safe(req
, req1
, &sba
->reqs_pending_list
, node
)
357 _sba_free_request(sba
, req
);
359 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
362 static int sba_send_mbox_request(struct sba_device
*sba
,
363 struct sba_request
*req
)
367 /* Send message for the request */
369 ret
= mbox_send_message(sba
->mchan
, &req
->msg
);
371 dev_err(sba
->dev
, "send message failed with error %d", ret
);
375 /* Check error returned by mailbox controller */
376 ret
= req
->msg
.error
;
378 dev_err(sba
->dev
, "message error %d", ret
);
381 /* Signal txdone for mailbox channel */
382 mbox_client_txdone(sba
->mchan
, ret
);
387 /* Note: Must be called with sba->reqs_lock held */
388 static void _sba_process_pending_requests(struct sba_device
*sba
)
392 struct sba_request
*req
;
394 /* Process few pending requests */
395 count
= SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL
;
396 while (!list_empty(&sba
->reqs_pending_list
) && count
) {
397 /* Get the first pending request */
398 req
= list_first_entry(&sba
->reqs_pending_list
,
399 struct sba_request
, node
);
401 /* Try to make request active */
402 if (!_sba_active_request(sba
, req
))
405 /* Send request to mailbox channel */
406 ret
= sba_send_mbox_request(sba
, req
);
408 _sba_pending_request(sba
, req
);
416 static void sba_process_received_request(struct sba_device
*sba
,
417 struct sba_request
*req
)
420 struct dma_async_tx_descriptor
*tx
;
421 struct sba_request
*nreq
, *first
= req
->first
;
423 /* Process only after all chained requests are received */
424 if (!atomic_dec_return(&first
->next_pending_count
)) {
427 WARN_ON(tx
->cookie
< 0);
428 if (tx
->cookie
> 0) {
429 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
430 dma_cookie_complete(tx
);
431 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
432 dmaengine_desc_get_callback_invoke(tx
, NULL
);
433 dma_descriptor_unmap(tx
);
435 tx
->callback_result
= NULL
;
438 dma_run_dependencies(tx
);
440 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
442 /* Free all requests chained to first request */
443 list_for_each_entry(nreq
, &first
->next
, next
)
444 _sba_free_request(sba
, nreq
);
445 INIT_LIST_HEAD(&first
->next
);
447 /* Free the first request */
448 _sba_free_request(sba
, first
);
450 /* Process pending requests */
451 _sba_process_pending_requests(sba
);
453 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
457 static void sba_write_stats_in_seqfile(struct sba_device
*sba
,
458 struct seq_file
*file
)
461 struct sba_request
*req
;
462 u32 free_count
= 0, alloced_count
= 0;
463 u32 pending_count
= 0, active_count
= 0, aborted_count
= 0;
465 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
467 list_for_each_entry(req
, &sba
->reqs_free_list
, node
)
468 if (async_tx_test_ack(&req
->tx
))
471 list_for_each_entry(req
, &sba
->reqs_alloc_list
, node
)
474 list_for_each_entry(req
, &sba
->reqs_pending_list
, node
)
477 list_for_each_entry(req
, &sba
->reqs_active_list
, node
)
480 list_for_each_entry(req
, &sba
->reqs_aborted_list
, node
)
483 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
485 seq_printf(file
, "maximum requests = %d\n", sba
->max_req
);
486 seq_printf(file
, "free requests = %d\n", free_count
);
487 seq_printf(file
, "alloced requests = %d\n", alloced_count
);
488 seq_printf(file
, "pending requests = %d\n", pending_count
);
489 seq_printf(file
, "active requests = %d\n", active_count
);
490 seq_printf(file
, "aborted requests = %d\n", aborted_count
);
493 /* ====== DMAENGINE callbacks ===== */
495 static void sba_free_chan_resources(struct dma_chan
*dchan
)
498 * Channel resources are pre-alloced so we just free-up
499 * whatever we can so that we can re-use pre-alloced
500 * channel resources next time.
502 sba_cleanup_nonpending_requests(to_sba_device(dchan
));
505 static int sba_device_terminate_all(struct dma_chan
*dchan
)
507 /* Cleanup all pending requests */
508 sba_cleanup_pending_requests(to_sba_device(dchan
));
513 static void sba_issue_pending(struct dma_chan
*dchan
)
516 struct sba_device
*sba
= to_sba_device(dchan
);
518 /* Process pending requests */
519 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
520 _sba_process_pending_requests(sba
);
521 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
524 static dma_cookie_t
sba_tx_submit(struct dma_async_tx_descriptor
*tx
)
528 struct sba_device
*sba
;
529 struct sba_request
*req
, *nreq
;
534 sba
= to_sba_device(tx
->chan
);
535 req
= to_sba_request(tx
);
537 /* Assign cookie and mark all chained requests pending */
538 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
539 cookie
= dma_cookie_assign(tx
);
540 _sba_pending_request(sba
, req
);
541 list_for_each_entry(nreq
, &req
->next
, next
)
542 _sba_pending_request(sba
, nreq
);
543 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
548 static enum dma_status
sba_tx_status(struct dma_chan
*dchan
,
550 struct dma_tx_state
*txstate
)
553 struct sba_device
*sba
= to_sba_device(dchan
);
555 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
556 if (ret
== DMA_COMPLETE
)
559 mbox_client_peek_data(sba
->mchan
);
561 return dma_cookie_status(dchan
, cookie
, txstate
);
564 static void sba_fillup_interrupt_msg(struct sba_request
*req
,
565 struct brcm_sba_command
*cmds
,
566 struct brcm_message
*msg
)
570 dma_addr_t resp_dma
= req
->tx
.phys
;
571 struct brcm_sba_command
*cmdsp
= cmds
;
573 /* Type-B command to load dummy data into buf0 */
574 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
575 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
576 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
577 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
578 c_mdata
= sba_cmd_load_c_mdata(0);
579 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
580 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
581 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
582 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
584 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
585 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
586 cmdsp
->data
= resp_dma
;
587 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
590 /* Type-A command to write buf0 to dummy location */
591 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
592 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
593 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
594 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
595 cmd
= sba_cmd_enc(cmd
, 0x1,
596 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
597 c_mdata
= sba_cmd_write_c_mdata(0);
598 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
599 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
600 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
601 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
603 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
604 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
605 if (req
->sba
->hw_resp_size
) {
606 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
607 cmdsp
->resp
= resp_dma
;
608 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
610 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
611 cmdsp
->data
= resp_dma
;
612 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
615 /* Fillup brcm_message */
616 msg
->type
= BRCM_MESSAGE_SBA
;
617 msg
->sba
.cmds
= cmds
;
618 msg
->sba
.cmds_count
= cmdsp
- cmds
;
623 static struct dma_async_tx_descriptor
*
624 sba_prep_dma_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
626 struct sba_request
*req
= NULL
;
627 struct sba_device
*sba
= to_sba_device(dchan
);
629 /* Alloc new request */
630 req
= sba_alloc_request(sba
);
635 * Force fence so that no requests are submitted
636 * until DMA callback for this request is invoked.
638 req
->flags
|= SBA_REQUEST_FENCE
;
640 /* Fillup request message */
641 sba_fillup_interrupt_msg(req
, req
->cmds
, &req
->msg
);
643 /* Init async_tx descriptor */
644 req
->tx
.flags
= flags
;
645 req
->tx
.cookie
= -EBUSY
;
650 static void sba_fillup_memcpy_msg(struct sba_request
*req
,
651 struct brcm_sba_command
*cmds
,
652 struct brcm_message
*msg
,
653 dma_addr_t msg_offset
, size_t msg_len
,
654 dma_addr_t dst
, dma_addr_t src
)
658 dma_addr_t resp_dma
= req
->tx
.phys
;
659 struct brcm_sba_command
*cmdsp
= cmds
;
661 /* Type-B command to load data into buf0 */
662 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
663 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
664 cmd
= sba_cmd_enc(cmd
, msg_len
,
665 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
666 c_mdata
= sba_cmd_load_c_mdata(0);
667 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
668 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
669 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
670 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
672 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
673 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
674 cmdsp
->data
= src
+ msg_offset
;
675 cmdsp
->data_len
= msg_len
;
678 /* Type-A command to write buf0 */
679 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
680 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
681 cmd
= sba_cmd_enc(cmd
, msg_len
,
682 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
683 cmd
= sba_cmd_enc(cmd
, 0x1,
684 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
685 c_mdata
= sba_cmd_write_c_mdata(0);
686 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
687 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
688 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
689 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
691 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
692 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
693 if (req
->sba
->hw_resp_size
) {
694 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
695 cmdsp
->resp
= resp_dma
;
696 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
698 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
699 cmdsp
->data
= dst
+ msg_offset
;
700 cmdsp
->data_len
= msg_len
;
703 /* Fillup brcm_message */
704 msg
->type
= BRCM_MESSAGE_SBA
;
705 msg
->sba
.cmds
= cmds
;
706 msg
->sba
.cmds_count
= cmdsp
- cmds
;
711 static struct sba_request
*
712 sba_prep_dma_memcpy_req(struct sba_device
*sba
,
713 dma_addr_t off
, dma_addr_t dst
, dma_addr_t src
,
714 size_t len
, unsigned long flags
)
716 struct sba_request
*req
= NULL
;
718 /* Alloc new request */
719 req
= sba_alloc_request(sba
);
722 if (flags
& DMA_PREP_FENCE
)
723 req
->flags
|= SBA_REQUEST_FENCE
;
725 /* Fillup request message */
726 sba_fillup_memcpy_msg(req
, req
->cmds
, &req
->msg
,
729 /* Init async_tx descriptor */
730 req
->tx
.flags
= flags
;
731 req
->tx
.cookie
= -EBUSY
;
736 static struct dma_async_tx_descriptor
*
737 sba_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t src
,
738 size_t len
, unsigned long flags
)
742 struct sba_device
*sba
= to_sba_device(dchan
);
743 struct sba_request
*first
= NULL
, *req
;
745 /* Create chained requests where each request is upto hw_buf_size */
747 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
749 req
= sba_prep_dma_memcpy_req(sba
, off
, dst
, src
,
753 sba_free_chained_requests(first
);
758 sba_chain_request(first
, req
);
766 return (first
) ? &first
->tx
: NULL
;
769 static void sba_fillup_xor_msg(struct sba_request
*req
,
770 struct brcm_sba_command
*cmds
,
771 struct brcm_message
*msg
,
772 dma_addr_t msg_offset
, size_t msg_len
,
773 dma_addr_t dst
, dma_addr_t
*src
, u32 src_cnt
)
778 dma_addr_t resp_dma
= req
->tx
.phys
;
779 struct brcm_sba_command
*cmdsp
= cmds
;
781 /* Type-B command to load data into buf0 */
782 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
783 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
784 cmd
= sba_cmd_enc(cmd
, msg_len
,
785 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
786 c_mdata
= sba_cmd_load_c_mdata(0);
787 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
788 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
789 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
790 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
792 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
793 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
794 cmdsp
->data
= src
[0] + msg_offset
;
795 cmdsp
->data_len
= msg_len
;
798 /* Type-B commands to xor data with buf0 and put it back in buf0 */
799 for (i
= 1; i
< src_cnt
; i
++) {
800 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
801 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
802 cmd
= sba_cmd_enc(cmd
, msg_len
,
803 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
804 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
805 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
806 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
807 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
808 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
810 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
811 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
812 cmdsp
->data
= src
[i
] + msg_offset
;
813 cmdsp
->data_len
= msg_len
;
817 /* Type-A command to write buf0 */
818 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
819 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
820 cmd
= sba_cmd_enc(cmd
, msg_len
,
821 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
822 cmd
= sba_cmd_enc(cmd
, 0x1,
823 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
824 c_mdata
= sba_cmd_write_c_mdata(0);
825 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
826 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
827 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
828 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
830 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
831 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
832 if (req
->sba
->hw_resp_size
) {
833 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
834 cmdsp
->resp
= resp_dma
;
835 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
837 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
838 cmdsp
->data
= dst
+ msg_offset
;
839 cmdsp
->data_len
= msg_len
;
842 /* Fillup brcm_message */
843 msg
->type
= BRCM_MESSAGE_SBA
;
844 msg
->sba
.cmds
= cmds
;
845 msg
->sba
.cmds_count
= cmdsp
- cmds
;
850 static struct sba_request
*
851 sba_prep_dma_xor_req(struct sba_device
*sba
,
852 dma_addr_t off
, dma_addr_t dst
, dma_addr_t
*src
,
853 u32 src_cnt
, size_t len
, unsigned long flags
)
855 struct sba_request
*req
= NULL
;
857 /* Alloc new request */
858 req
= sba_alloc_request(sba
);
861 if (flags
& DMA_PREP_FENCE
)
862 req
->flags
|= SBA_REQUEST_FENCE
;
864 /* Fillup request message */
865 sba_fillup_xor_msg(req
, req
->cmds
, &req
->msg
,
866 off
, len
, dst
, src
, src_cnt
);
868 /* Init async_tx descriptor */
869 req
->tx
.flags
= flags
;
870 req
->tx
.cookie
= -EBUSY
;
875 static struct dma_async_tx_descriptor
*
876 sba_prep_dma_xor(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
877 u32 src_cnt
, size_t len
, unsigned long flags
)
881 struct sba_device
*sba
= to_sba_device(dchan
);
882 struct sba_request
*first
= NULL
, *req
;
885 if (unlikely(src_cnt
> sba
->max_xor_srcs
))
888 /* Create chained requests where each request is upto hw_buf_size */
890 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
892 req
= sba_prep_dma_xor_req(sba
, off
, dst
, src
, src_cnt
,
896 sba_free_chained_requests(first
);
901 sba_chain_request(first
, req
);
909 return (first
) ? &first
->tx
: NULL
;
912 static void sba_fillup_pq_msg(struct sba_request
*req
,
914 struct brcm_sba_command
*cmds
,
915 struct brcm_message
*msg
,
916 dma_addr_t msg_offset
, size_t msg_len
,
917 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
918 const u8
*scf
, dma_addr_t
*src
, u32 src_cnt
)
923 dma_addr_t resp_dma
= req
->tx
.phys
;
924 struct brcm_sba_command
*cmdsp
= cmds
;
927 /* Type-B command to load old P into buf0 */
929 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
930 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
931 cmd
= sba_cmd_enc(cmd
, msg_len
,
932 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
933 c_mdata
= sba_cmd_load_c_mdata(0);
934 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
935 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
936 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
937 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
939 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
940 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
941 cmdsp
->data
= *dst_p
+ msg_offset
;
942 cmdsp
->data_len
= msg_len
;
946 /* Type-B command to load old Q into buf1 */
948 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
949 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
950 cmd
= sba_cmd_enc(cmd
, msg_len
,
951 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
952 c_mdata
= sba_cmd_load_c_mdata(1);
953 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
954 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
955 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
956 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
958 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
959 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
960 cmdsp
->data
= *dst_q
+ msg_offset
;
961 cmdsp
->data_len
= msg_len
;
965 /* Type-A command to zero all buffers */
966 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
967 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
968 cmd
= sba_cmd_enc(cmd
, msg_len
,
969 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
970 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
971 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
973 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
974 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
978 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
979 for (i
= 0; i
< src_cnt
; i
++) {
980 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
981 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
982 cmd
= sba_cmd_enc(cmd
, msg_len
,
983 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
984 c_mdata
= sba_cmd_pq_c_mdata(raid6_gflog
[scf
[i
]], 1, 0);
985 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
986 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
987 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
988 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
989 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS_XOR
,
990 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
992 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
993 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
994 cmdsp
->data
= src
[i
] + msg_offset
;
995 cmdsp
->data_len
= msg_len
;
999 /* Type-A command to write buf0 */
1001 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1002 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1003 cmd
= sba_cmd_enc(cmd
, msg_len
,
1004 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1005 cmd
= sba_cmd_enc(cmd
, 0x1,
1006 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1007 c_mdata
= sba_cmd_write_c_mdata(0);
1008 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1009 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1010 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1011 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1013 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1014 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1015 if (req
->sba
->hw_resp_size
) {
1016 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1017 cmdsp
->resp
= resp_dma
;
1018 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1020 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1021 cmdsp
->data
= *dst_p
+ msg_offset
;
1022 cmdsp
->data_len
= msg_len
;
1026 /* Type-A command to write buf1 */
1028 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1029 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1030 cmd
= sba_cmd_enc(cmd
, msg_len
,
1031 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1032 cmd
= sba_cmd_enc(cmd
, 0x1,
1033 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1034 c_mdata
= sba_cmd_write_c_mdata(1);
1035 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1036 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1037 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1038 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1040 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1041 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1042 if (req
->sba
->hw_resp_size
) {
1043 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1044 cmdsp
->resp
= resp_dma
;
1045 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1047 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1048 cmdsp
->data
= *dst_q
+ msg_offset
;
1049 cmdsp
->data_len
= msg_len
;
1053 /* Fillup brcm_message */
1054 msg
->type
= BRCM_MESSAGE_SBA
;
1055 msg
->sba
.cmds
= cmds
;
1056 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1061 static struct sba_request
*
1062 sba_prep_dma_pq_req(struct sba_device
*sba
, dma_addr_t off
,
1063 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
, dma_addr_t
*src
,
1064 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1066 struct sba_request
*req
= NULL
;
1068 /* Alloc new request */
1069 req
= sba_alloc_request(sba
);
1072 if (flags
& DMA_PREP_FENCE
)
1073 req
->flags
|= SBA_REQUEST_FENCE
;
1075 /* Fillup request messages */
1076 sba_fillup_pq_msg(req
, dmaf_continue(flags
),
1077 req
->cmds
, &req
->msg
,
1078 off
, len
, dst_p
, dst_q
, scf
, src
, src_cnt
);
1080 /* Init async_tx descriptor */
1081 req
->tx
.flags
= flags
;
1082 req
->tx
.cookie
= -EBUSY
;
1087 static void sba_fillup_pq_single_msg(struct sba_request
*req
,
1089 struct brcm_sba_command
*cmds
,
1090 struct brcm_message
*msg
,
1091 dma_addr_t msg_offset
, size_t msg_len
,
1092 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1093 dma_addr_t src
, u8 scf
)
1097 u8 pos
, dpos
= raid6_gflog
[scf
];
1098 dma_addr_t resp_dma
= req
->tx
.phys
;
1099 struct brcm_sba_command
*cmdsp
= cmds
;
1105 /* Type-B command to load old P into buf0 */
1106 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1107 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1108 cmd
= sba_cmd_enc(cmd
, msg_len
,
1109 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1110 c_mdata
= sba_cmd_load_c_mdata(0);
1111 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1112 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1113 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1114 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1116 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1117 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1118 cmdsp
->data
= *dst_p
+ msg_offset
;
1119 cmdsp
->data_len
= msg_len
;
1123 * Type-B commands to xor data with buf0 and put it
1126 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1127 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1128 cmd
= sba_cmd_enc(cmd
, msg_len
,
1129 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1130 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1131 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1132 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1133 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1134 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1136 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1137 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1138 cmdsp
->data
= src
+ msg_offset
;
1139 cmdsp
->data_len
= msg_len
;
1142 /* Type-B command to load old P into buf0 */
1143 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1144 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1145 cmd
= sba_cmd_enc(cmd
, msg_len
,
1146 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1147 c_mdata
= sba_cmd_load_c_mdata(0);
1148 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1149 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1150 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1151 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1153 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1154 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1155 cmdsp
->data
= src
+ msg_offset
;
1156 cmdsp
->data_len
= msg_len
;
1160 /* Type-A command to write buf0 */
1161 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1162 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1163 cmd
= sba_cmd_enc(cmd
, msg_len
,
1164 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1165 cmd
= sba_cmd_enc(cmd
, 0x1,
1166 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1167 c_mdata
= sba_cmd_write_c_mdata(0);
1168 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1169 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1170 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1171 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1173 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1174 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1175 if (req
->sba
->hw_resp_size
) {
1176 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1177 cmdsp
->resp
= resp_dma
;
1178 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1180 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1181 cmdsp
->data
= *dst_p
+ msg_offset
;
1182 cmdsp
->data_len
= msg_len
;
1189 /* Type-A command to zero all buffers */
1190 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1191 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1192 cmd
= sba_cmd_enc(cmd
, msg_len
,
1193 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1194 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
1195 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1197 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1198 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1202 goto skip_q_computation
;
1203 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1204 dpos
: (req
->sba
->max_pq_coefs
- 1);
1207 * Type-B command to generate initial Q from data
1208 * and store output into buf0
1210 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1211 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1212 cmd
= sba_cmd_enc(cmd
, msg_len
,
1213 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1214 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 0);
1215 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1216 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1217 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1218 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1219 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1220 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1222 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1223 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1224 cmdsp
->data
= src
+ msg_offset
;
1225 cmdsp
->data_len
= msg_len
;
1230 /* Multiple Type-A command to generate final Q */
1232 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1233 dpos
: (req
->sba
->max_pq_coefs
- 1);
1236 * Type-A command to generate Q with buf0 and
1237 * buf1 store result in buf0
1239 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1240 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1241 cmd
= sba_cmd_enc(cmd
, msg_len
,
1242 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1243 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 1);
1244 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1245 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1246 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1247 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1248 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1249 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1251 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1252 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1261 * Type-B command to XOR previous output with
1262 * buf0 and write it into buf0
1264 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1265 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1266 cmd
= sba_cmd_enc(cmd
, msg_len
,
1267 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1268 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1269 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1270 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1271 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1272 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1274 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1275 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1276 cmdsp
->data
= *dst_q
+ msg_offset
;
1277 cmdsp
->data_len
= msg_len
;
1281 /* Type-A command to write buf0 */
1282 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1283 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1284 cmd
= sba_cmd_enc(cmd
, msg_len
,
1285 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1286 cmd
= sba_cmd_enc(cmd
, 0x1,
1287 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1288 c_mdata
= sba_cmd_write_c_mdata(0);
1289 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1290 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1291 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1292 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1294 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1295 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1296 if (req
->sba
->hw_resp_size
) {
1297 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1298 cmdsp
->resp
= resp_dma
;
1299 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1301 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1302 cmdsp
->data
= *dst_q
+ msg_offset
;
1303 cmdsp
->data_len
= msg_len
;
1307 /* Fillup brcm_message */
1308 msg
->type
= BRCM_MESSAGE_SBA
;
1309 msg
->sba
.cmds
= cmds
;
1310 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1315 static struct sba_request
*
1316 sba_prep_dma_pq_single_req(struct sba_device
*sba
, dma_addr_t off
,
1317 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1318 dma_addr_t src
, u8 scf
, size_t len
,
1319 unsigned long flags
)
1321 struct sba_request
*req
= NULL
;
1323 /* Alloc new request */
1324 req
= sba_alloc_request(sba
);
1327 if (flags
& DMA_PREP_FENCE
)
1328 req
->flags
|= SBA_REQUEST_FENCE
;
1330 /* Fillup request messages */
1331 sba_fillup_pq_single_msg(req
, dmaf_continue(flags
),
1332 req
->cmds
, &req
->msg
, off
, len
,
1333 dst_p
, dst_q
, src
, scf
);
1335 /* Init async_tx descriptor */
1336 req
->tx
.flags
= flags
;
1337 req
->tx
.cookie
= -EBUSY
;
1342 static struct dma_async_tx_descriptor
*
1343 sba_prep_dma_pq(struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1344 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1350 dma_addr_t
*dst_p
= NULL
, *dst_q
= NULL
;
1351 struct sba_device
*sba
= to_sba_device(dchan
);
1352 struct sba_request
*first
= NULL
, *req
;
1355 if (unlikely(src_cnt
> sba
->max_pq_srcs
))
1357 for (i
= 0; i
< src_cnt
; i
++)
1358 if (sba
->max_pq_coefs
<= raid6_gflog
[scf
[i
]])
1361 /* Figure-out P and Q destination addresses */
1362 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
1364 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
1367 /* Create chained requests where each request is upto hw_buf_size */
1369 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
1372 dst_q_index
= src_cnt
;
1375 for (i
= 0; i
< src_cnt
; i
++) {
1376 if (*dst_q
== src
[i
]) {
1383 if (dst_q_index
< src_cnt
) {
1385 req
= sba_prep_dma_pq_single_req(sba
,
1386 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1387 req_len
, flags
| DMA_PREP_FENCE
);
1392 sba_chain_request(first
, req
);
1396 flags
|= DMA_PREP_CONTINUE
;
1399 for (i
= 0; i
< src_cnt
; i
++) {
1400 if (dst_q_index
== i
)
1403 req
= sba_prep_dma_pq_single_req(sba
,
1404 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1405 req_len
, flags
| DMA_PREP_FENCE
);
1410 sba_chain_request(first
, req
);
1414 flags
|= DMA_PREP_CONTINUE
;
1417 req
= sba_prep_dma_pq_req(sba
, off
,
1418 dst_p
, dst_q
, src
, src_cnt
,
1419 scf
, req_len
, flags
);
1424 sba_chain_request(first
, req
);
1433 return (first
) ? &first
->tx
: NULL
;
1437 sba_free_chained_requests(first
);
1441 /* ====== Mailbox callbacks ===== */
1443 static void sba_receive_message(struct mbox_client
*cl
, void *msg
)
1445 struct brcm_message
*m
= msg
;
1446 struct sba_request
*req
= m
->ctx
;
1447 struct sba_device
*sba
= req
->sba
;
1449 /* Error count if message has error */
1451 dev_err(sba
->dev
, "%s got message with error %d",
1452 dma_chan_name(&sba
->dma_chan
), m
->error
);
1454 /* Process received request */
1455 sba_process_received_request(sba
, req
);
1458 /* ====== Debugfs callbacks ====== */
1460 static int sba_debugfs_stats_show(struct seq_file
*file
, void *offset
)
1462 struct platform_device
*pdev
= to_platform_device(file
->private);
1463 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1465 /* Write stats in file */
1466 sba_write_stats_in_seqfile(sba
, file
);
1471 /* ====== Platform driver routines ===== */
1473 static int sba_prealloc_channel_resources(struct sba_device
*sba
)
1476 struct sba_request
*req
= NULL
;
1478 sba
->resp_base
= dma_alloc_coherent(sba
->mbox_dev
,
1479 sba
->max_resp_pool_size
,
1480 &sba
->resp_dma_base
, GFP_KERNEL
);
1481 if (!sba
->resp_base
)
1484 sba
->cmds_base
= dma_alloc_coherent(sba
->mbox_dev
,
1485 sba
->max_cmds_pool_size
,
1486 &sba
->cmds_dma_base
, GFP_KERNEL
);
1487 if (!sba
->cmds_base
) {
1489 goto fail_free_resp_pool
;
1492 spin_lock_init(&sba
->reqs_lock
);
1493 sba
->reqs_fence
= false;
1494 INIT_LIST_HEAD(&sba
->reqs_alloc_list
);
1495 INIT_LIST_HEAD(&sba
->reqs_pending_list
);
1496 INIT_LIST_HEAD(&sba
->reqs_active_list
);
1497 INIT_LIST_HEAD(&sba
->reqs_aborted_list
);
1498 INIT_LIST_HEAD(&sba
->reqs_free_list
);
1500 for (i
= 0; i
< sba
->max_req
; i
++) {
1501 req
= devm_kzalloc(sba
->dev
,
1503 sba
->max_cmd_per_req
* sizeof(req
->cmds
[0]),
1507 goto fail_free_cmds_pool
;
1509 INIT_LIST_HEAD(&req
->node
);
1511 req
->flags
= SBA_REQUEST_STATE_FREE
;
1512 INIT_LIST_HEAD(&req
->next
);
1513 atomic_set(&req
->next_pending_count
, 0);
1514 for (j
= 0; j
< sba
->max_cmd_per_req
; j
++) {
1515 req
->cmds
[j
].cmd
= 0;
1516 req
->cmds
[j
].cmd_dma
= sba
->cmds_base
+
1517 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1518 req
->cmds
[j
].cmd_dma_addr
= sba
->cmds_dma_base
+
1519 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1520 req
->cmds
[j
].flags
= 0;
1522 memset(&req
->msg
, 0, sizeof(req
->msg
));
1523 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
1524 async_tx_ack(&req
->tx
);
1525 req
->tx
.tx_submit
= sba_tx_submit
;
1526 req
->tx
.phys
= sba
->resp_dma_base
+ i
* sba
->hw_resp_size
;
1527 list_add_tail(&req
->node
, &sba
->reqs_free_list
);
1532 fail_free_cmds_pool
:
1533 dma_free_coherent(sba
->mbox_dev
,
1534 sba
->max_cmds_pool_size
,
1535 sba
->cmds_base
, sba
->cmds_dma_base
);
1536 fail_free_resp_pool
:
1537 dma_free_coherent(sba
->mbox_dev
,
1538 sba
->max_resp_pool_size
,
1539 sba
->resp_base
, sba
->resp_dma_base
);
1543 static void sba_freeup_channel_resources(struct sba_device
*sba
)
1545 dmaengine_terminate_all(&sba
->dma_chan
);
1546 dma_free_coherent(sba
->mbox_dev
, sba
->max_cmds_pool_size
,
1547 sba
->cmds_base
, sba
->cmds_dma_base
);
1548 dma_free_coherent(sba
->mbox_dev
, sba
->max_resp_pool_size
,
1549 sba
->resp_base
, sba
->resp_dma_base
);
1550 sba
->resp_base
= NULL
;
1551 sba
->resp_dma_base
= 0;
1554 static int sba_async_register(struct sba_device
*sba
)
1557 struct dma_device
*dma_dev
= &sba
->dma_dev
;
1559 /* Initialize DMA channel cookie */
1560 sba
->dma_chan
.device
= dma_dev
;
1561 dma_cookie_init(&sba
->dma_chan
);
1563 /* Initialize DMA device capability mask */
1564 dma_cap_zero(dma_dev
->cap_mask
);
1565 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
1566 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1567 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1568 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1571 * Set mailbox channel device as the base device of
1572 * our dma_device because the actual memory accesses
1573 * will be done by mailbox controller
1575 dma_dev
->dev
= sba
->mbox_dev
;
1577 /* Set base prep routines */
1578 dma_dev
->device_free_chan_resources
= sba_free_chan_resources
;
1579 dma_dev
->device_terminate_all
= sba_device_terminate_all
;
1580 dma_dev
->device_issue_pending
= sba_issue_pending
;
1581 dma_dev
->device_tx_status
= sba_tx_status
;
1583 /* Set interrupt routine */
1584 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1585 dma_dev
->device_prep_dma_interrupt
= sba_prep_dma_interrupt
;
1587 /* Set memcpy routine */
1588 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1589 dma_dev
->device_prep_dma_memcpy
= sba_prep_dma_memcpy
;
1591 /* Set xor routine and capability */
1592 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1593 dma_dev
->device_prep_dma_xor
= sba_prep_dma_xor
;
1594 dma_dev
->max_xor
= sba
->max_xor_srcs
;
1597 /* Set pq routine and capability */
1598 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1599 dma_dev
->device_prep_dma_pq
= sba_prep_dma_pq
;
1600 dma_set_maxpq(dma_dev
, sba
->max_pq_srcs
, 0);
1603 /* Initialize DMA device channel list */
1604 INIT_LIST_HEAD(&dma_dev
->channels
);
1605 list_add_tail(&sba
->dma_chan
.device_node
, &dma_dev
->channels
);
1607 /* Register with Linux async DMA framework*/
1608 ret
= dma_async_device_register(dma_dev
);
1610 dev_err(sba
->dev
, "async device register error %d", ret
);
1614 dev_info(sba
->dev
, "%s capabilities: %s%s%s%s\n",
1615 dma_chan_name(&sba
->dma_chan
),
1616 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "interrupt " : "",
1617 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "memcpy " : "",
1618 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1619 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "pq " : "");
1624 static int sba_probe(struct platform_device
*pdev
)
1627 struct sba_device
*sba
;
1628 struct platform_device
*mbox_pdev
;
1629 struct of_phandle_args args
;
1631 /* Allocate main SBA struct */
1632 sba
= devm_kzalloc(&pdev
->dev
, sizeof(*sba
), GFP_KERNEL
);
1636 sba
->dev
= &pdev
->dev
;
1637 platform_set_drvdata(pdev
, sba
);
1639 /* Number of mailbox channels should be atleast 1 */
1640 ret
= of_count_phandle_with_args(pdev
->dev
.of_node
,
1641 "mboxes", "#mbox-cells");
1645 /* Determine SBA version from DT compatible string */
1646 if (of_device_is_compatible(sba
->dev
->of_node
, "brcm,iproc-sba"))
1647 sba
->ver
= SBA_VER_1
;
1648 else if (of_device_is_compatible(sba
->dev
->of_node
,
1649 "brcm,iproc-sba-v2"))
1650 sba
->ver
= SBA_VER_2
;
1654 /* Derived Configuration parameters */
1657 sba
->hw_buf_size
= 4096;
1658 sba
->hw_resp_size
= 8;
1659 sba
->max_pq_coefs
= 6;
1660 sba
->max_pq_srcs
= 6;
1663 sba
->hw_buf_size
= 4096;
1664 sba
->hw_resp_size
= 8;
1665 sba
->max_pq_coefs
= 30;
1667 * We can support max_pq_srcs == max_pq_coefs because
1668 * we are limited by number of SBA commands that we can
1669 * fit in one message for underlying ring manager HW.
1671 sba
->max_pq_srcs
= 12;
1676 sba
->max_req
= SBA_MAX_REQ_PER_MBOX_CHANNEL
;
1677 sba
->max_cmd_per_req
= sba
->max_pq_srcs
+ 3;
1678 sba
->max_xor_srcs
= sba
->max_cmd_per_req
- 1;
1679 sba
->max_resp_pool_size
= sba
->max_req
* sba
->hw_resp_size
;
1680 sba
->max_cmds_pool_size
= sba
->max_req
*
1681 sba
->max_cmd_per_req
* sizeof(u64
);
1683 /* Setup mailbox client */
1684 sba
->client
.dev
= &pdev
->dev
;
1685 sba
->client
.rx_callback
= sba_receive_message
;
1686 sba
->client
.tx_block
= false;
1687 sba
->client
.knows_txdone
= true;
1688 sba
->client
.tx_tout
= 0;
1690 /* Request mailbox channel */
1691 sba
->mchan
= mbox_request_channel(&sba
->client
, 0);
1692 if (IS_ERR(sba
->mchan
)) {
1693 ret
= PTR_ERR(sba
->mchan
);
1694 goto fail_free_mchan
;
1697 /* Find-out underlying mailbox device */
1698 ret
= of_parse_phandle_with_args(pdev
->dev
.of_node
,
1699 "mboxes", "#mbox-cells", 0, &args
);
1701 goto fail_free_mchan
;
1702 mbox_pdev
= of_find_device_by_node(args
.np
);
1703 of_node_put(args
.np
);
1706 goto fail_free_mchan
;
1708 sba
->mbox_dev
= &mbox_pdev
->dev
;
1710 /* Prealloc channel resource */
1711 ret
= sba_prealloc_channel_resources(sba
);
1713 goto fail_free_mchan
;
1715 /* Check availability of debugfs */
1716 if (!debugfs_initialized())
1719 /* Create debugfs root entry */
1720 sba
->root
= debugfs_create_dir(dev_name(sba
->dev
), NULL
);
1721 if (IS_ERR_OR_NULL(sba
->root
)) {
1722 dev_err(sba
->dev
, "failed to create debugfs root entry\n");
1727 /* Create debugfs stats entry */
1728 sba
->stats
= debugfs_create_devm_seqfile(sba
->dev
, "stats", sba
->root
,
1729 sba_debugfs_stats_show
);
1730 if (IS_ERR_OR_NULL(sba
->stats
))
1731 dev_err(sba
->dev
, "failed to create debugfs stats file\n");
1734 /* Register DMA device with Linux async framework */
1735 ret
= sba_async_register(sba
);
1737 goto fail_free_resources
;
1739 /* Print device info */
1740 dev_info(sba
->dev
, "%s using SBAv%d mailbox channel from %s",
1741 dma_chan_name(&sba
->dma_chan
), sba
->ver
+1,
1742 dev_name(sba
->mbox_dev
));
1746 fail_free_resources
:
1747 debugfs_remove_recursive(sba
->root
);
1748 sba_freeup_channel_resources(sba
);
1750 mbox_free_channel(sba
->mchan
);
1754 static int sba_remove(struct platform_device
*pdev
)
1756 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1758 dma_async_device_unregister(&sba
->dma_dev
);
1760 debugfs_remove_recursive(sba
->root
);
1762 sba_freeup_channel_resources(sba
);
1764 mbox_free_channel(sba
->mchan
);
1769 static const struct of_device_id sba_of_match
[] = {
1770 { .compatible
= "brcm,iproc-sba", },
1771 { .compatible
= "brcm,iproc-sba-v2", },
1774 MODULE_DEVICE_TABLE(of
, sba_of_match
);
1776 static struct platform_driver sba_driver
= {
1778 .remove
= sba_remove
,
1780 .name
= "bcm-sba-raid",
1781 .of_match_table
= sba_of_match
,
1784 module_platform_driver(sba_driver
);
1786 MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1787 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1788 MODULE_LICENSE("GPL v2");