]>
Commit | Line | Data |
---|---|---|
e2f91578 OJ |
1 | /* |
2 | * Copyright (C) 2010 NVIDIA Corporation. | |
3 | * Copyright (C) 2010 Google, Inc. | |
4 | * | |
5 | * This software is licensed under the terms of the GNU General Public | |
6 | * License version 2, as published by the Free Software Foundation, and | |
7 | * may be copied, distributed, and modified under those terms. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
17 | #include <linux/io.h> | |
b861c275 | 18 | #include <linux/of.h> |
5b39fc0b | 19 | #include <linux/dmaengine.h> |
e2f91578 OJ |
20 | #include <linux/dma-mapping.h> |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/completion.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/mutex.h> | |
25 | ||
e2f91578 | 26 | #include "apbio.h" |
2be39c07 | 27 | #include "iomap.h" |
e2f91578 | 28 | |
6a2473c5 | 29 | #if defined(CONFIG_TEGRA20_APB_DMA) |
e2f91578 | 30 | static DEFINE_MUTEX(tegra_apb_dma_lock); |
e2f91578 OJ |
31 | static u32 *tegra_apb_bb; |
32 | static dma_addr_t tegra_apb_bb_phys; | |
33 | static DECLARE_COMPLETION(tegra_apb_wait); | |
34 | ||
b861c275 LD |
35 | static u32 tegra_apb_readl_direct(unsigned long offset); |
36 | static void tegra_apb_writel_direct(u32 value, unsigned long offset); | |
37 | ||
5b39fc0b LD |
38 | static struct dma_chan *tegra_apb_dma_chan; |
39 | static struct dma_slave_config dma_sconfig; | |
40 | ||
deeb8d19 | 41 | static bool tegra_apb_dma_init(void) |
5b39fc0b LD |
42 | { |
43 | dma_cap_mask_t mask; | |
44 | ||
45 | mutex_lock(&tegra_apb_dma_lock); | |
46 | ||
47 | /* Check to see if we raced to setup */ | |
48 | if (tegra_apb_dma_chan) | |
49 | goto skip_init; | |
50 | ||
51 | dma_cap_zero(mask); | |
52 | dma_cap_set(DMA_SLAVE, mask); | |
53 | tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL); | |
54 | if (!tegra_apb_dma_chan) { | |
55 | /* | |
56 | * This is common until the device is probed, so don't | |
57 | * shout about it. | |
58 | */ | |
59 | pr_debug("%s: can not allocate dma channel\n", __func__); | |
60 | goto err_dma_alloc; | |
61 | } | |
62 | ||
63 | tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), | |
64 | &tegra_apb_bb_phys, GFP_KERNEL); | |
65 | if (!tegra_apb_bb) { | |
66 | pr_err("%s: can not allocate bounce buffer\n", __func__); | |
67 | goto err_buff_alloc; | |
68 | } | |
69 | ||
70 | dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
71 | dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
5b39fc0b LD |
72 | dma_sconfig.src_maxburst = 1; |
73 | dma_sconfig.dst_maxburst = 1; | |
74 | ||
75 | skip_init: | |
76 | mutex_unlock(&tegra_apb_dma_lock); | |
77 | return true; | |
78 | ||
79 | err_buff_alloc: | |
80 | dma_release_channel(tegra_apb_dma_chan); | |
81 | tegra_apb_dma_chan = NULL; | |
82 | ||
83 | err_dma_alloc: | |
84 | mutex_unlock(&tegra_apb_dma_lock); | |
85 | return false; | |
86 | } | |
87 | ||
88 | static void apb_dma_complete(void *args) | |
89 | { | |
90 | complete(&tegra_apb_wait); | |
91 | } | |
92 | ||
93 | static int do_dma_transfer(unsigned long apb_add, | |
94 | enum dma_transfer_direction dir) | |
95 | { | |
96 | struct dma_async_tx_descriptor *dma_desc; | |
97 | int ret; | |
98 | ||
99 | if (dir == DMA_DEV_TO_MEM) | |
100 | dma_sconfig.src_addr = apb_add; | |
101 | else | |
102 | dma_sconfig.dst_addr = apb_add; | |
103 | ||
104 | ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig); | |
105 | if (ret) | |
106 | return ret; | |
107 | ||
108 | dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan, | |
109 | tegra_apb_bb_phys, sizeof(u32), dir, | |
110 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
111 | if (!dma_desc) | |
112 | return -EINVAL; | |
113 | ||
114 | dma_desc->callback = apb_dma_complete; | |
115 | dma_desc->callback_param = NULL; | |
116 | ||
16735d02 | 117 | reinit_completion(&tegra_apb_wait); |
5b39fc0b LD |
118 | |
119 | dmaengine_submit(dma_desc); | |
120 | dma_async_issue_pending(tegra_apb_dma_chan); | |
121 | ret = wait_for_completion_timeout(&tegra_apb_wait, | |
122 | msecs_to_jiffies(50)); | |
123 | ||
124 | if (WARN(ret == 0, "apb read dma timed out")) { | |
125 | dmaengine_terminate_all(tegra_apb_dma_chan); | |
126 | return -EFAULT; | |
127 | } | |
128 | return 0; | |
129 | } | |
130 | ||
131 | static u32 tegra_apb_readl_using_dma(unsigned long offset) | |
132 | { | |
133 | int ret; | |
134 | ||
135 | if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) | |
136 | return tegra_apb_readl_direct(offset); | |
137 | ||
138 | mutex_lock(&tegra_apb_dma_lock); | |
139 | ret = do_dma_transfer(offset, DMA_DEV_TO_MEM); | |
140 | if (ret < 0) { | |
141 | pr_err("error in reading offset 0x%08lx using dma\n", offset); | |
142 | *(u32 *)tegra_apb_bb = 0; | |
143 | } | |
144 | mutex_unlock(&tegra_apb_dma_lock); | |
145 | return *((u32 *)tegra_apb_bb); | |
146 | } | |
147 | ||
148 | static void tegra_apb_writel_using_dma(u32 value, unsigned long offset) | |
149 | { | |
150 | int ret; | |
151 | ||
152 | if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) { | |
153 | tegra_apb_writel_direct(value, offset); | |
154 | return; | |
155 | } | |
156 | ||
157 | mutex_lock(&tegra_apb_dma_lock); | |
158 | *((u32 *)tegra_apb_bb) = value; | |
159 | ret = do_dma_transfer(offset, DMA_MEM_TO_DEV); | |
160 | if (ret < 0) | |
161 | pr_err("error in writing offset 0x%08lx using dma\n", offset); | |
162 | mutex_unlock(&tegra_apb_dma_lock); | |
163 | } | |
b861c275 LD |
164 | #else |
165 | #define tegra_apb_readl_using_dma tegra_apb_readl_direct | |
166 | #define tegra_apb_writel_using_dma tegra_apb_writel_direct | |
167 | #endif | |
168 | ||
169 | typedef u32 (*apbio_read_fptr)(unsigned long offset); | |
170 | typedef void (*apbio_write_fptr)(u32 value, unsigned long offset); | |
171 | ||
172 | static apbio_read_fptr apbio_read; | |
173 | static apbio_write_fptr apbio_write; | |
174 | ||
175 | static u32 tegra_apb_readl_direct(unsigned long offset) | |
176 | { | |
f8e798a9 | 177 | return readl(IO_ADDRESS(offset)); |
b861c275 LD |
178 | } |
179 | ||
180 | static void tegra_apb_writel_direct(u32 value, unsigned long offset) | |
181 | { | |
f8e798a9 | 182 | writel(value, IO_ADDRESS(offset)); |
b861c275 LD |
183 | } |
184 | ||
185 | void tegra_apb_io_init(void) | |
186 | { | |
187 | /* Need to use dma only when it is Tegra20 based platform */ | |
188 | if (of_machine_is_compatible("nvidia,tegra20") || | |
189 | !of_have_populated_dt()) { | |
190 | apbio_read = tegra_apb_readl_using_dma; | |
191 | apbio_write = tegra_apb_writel_using_dma; | |
192 | } else { | |
193 | apbio_read = tegra_apb_readl_direct; | |
194 | apbio_write = tegra_apb_writel_direct; | |
195 | } | |
196 | } | |
197 | ||
198 | u32 tegra_apb_readl(unsigned long offset) | |
199 | { | |
200 | return apbio_read(offset); | |
201 | } | |
202 | ||
203 | void tegra_apb_writel(u32 value, unsigned long offset) | |
204 | { | |
205 | apbio_write(value, offset); | |
206 | } |