]>
Commit | Line | Data |
---|---|---|
4078f575 EA |
1 | /* |
2 | * Copyright (C) 2016 Broadcom | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | /** | |
18 | * DOC: VC4 DSI0/DSI1 module | |
19 | * | |
20 | * BCM2835 contains two DSI modules, DSI0 and DSI1. DSI0 is a | |
21 | * single-lane DSI controller, while DSI1 is a more modern 4-lane DSI | |
22 | * controller. | |
23 | * | |
24 | * Most Raspberry Pi boards expose DSI1 as their "DISPLAY" connector, | |
25 | * while the compute module brings both DSI0 and DSI1 out. | |
26 | * | |
27 | * This driver has been tested for DSI1 video-mode display only | |
28 | * currently, with most of the information necessary for DSI0 | |
29 | * hopefully present. | |
30 | */ | |
31 | ||
32 | #include "drm_atomic_helper.h" | |
33 | #include "drm_crtc_helper.h" | |
34 | #include "drm_edid.h" | |
35 | #include "drm_mipi_dsi.h" | |
36 | #include "drm_panel.h" | |
37 | #include "linux/clk.h" | |
38 | #include "linux/clk-provider.h" | |
39 | #include "linux/completion.h" | |
40 | #include "linux/component.h" | |
41 | #include "linux/dmaengine.h" | |
42 | #include "linux/i2c.h" | |
43 | #include "linux/of_address.h" | |
44 | #include "linux/of_platform.h" | |
45 | #include "linux/pm_runtime.h" | |
46 | #include "vc4_drv.h" | |
47 | #include "vc4_regs.h" | |
48 | ||
49 | #define DSI_CMD_FIFO_DEPTH 16 | |
50 | #define DSI_PIX_FIFO_DEPTH 256 | |
51 | #define DSI_PIX_FIFO_WIDTH 4 | |
52 | ||
53 | #define DSI0_CTRL 0x00 | |
54 | ||
55 | /* Command packet control. */ | |
56 | #define DSI0_TXPKT1C 0x04 /* AKA PKTC */ | |
57 | #define DSI1_TXPKT1C 0x04 | |
58 | # define DSI_TXPKT1C_TRIG_CMD_MASK VC4_MASK(31, 24) | |
59 | # define DSI_TXPKT1C_TRIG_CMD_SHIFT 24 | |
60 | # define DSI_TXPKT1C_CMD_REPEAT_MASK VC4_MASK(23, 10) | |
61 | # define DSI_TXPKT1C_CMD_REPEAT_SHIFT 10 | |
62 | ||
63 | # define DSI_TXPKT1C_DISPLAY_NO_MASK VC4_MASK(9, 8) | |
64 | # define DSI_TXPKT1C_DISPLAY_NO_SHIFT 8 | |
65 | /* Short, trigger, BTA, or a long packet that fits all in CMDFIFO. */ | |
66 | # define DSI_TXPKT1C_DISPLAY_NO_SHORT 0 | |
67 | /* Primary display where cmdfifo provides part of the payload and | |
68 | * pixelvalve the rest. | |
69 | */ | |
70 | # define DSI_TXPKT1C_DISPLAY_NO_PRIMARY 1 | |
71 | /* Secondary display where cmdfifo provides part of the payload and | |
72 | * pixfifo the rest. | |
73 | */ | |
74 | # define DSI_TXPKT1C_DISPLAY_NO_SECONDARY 2 | |
75 | ||
76 | # define DSI_TXPKT1C_CMD_TX_TIME_MASK VC4_MASK(7, 6) | |
77 | # define DSI_TXPKT1C_CMD_TX_TIME_SHIFT 6 | |
78 | ||
79 | # define DSI_TXPKT1C_CMD_CTRL_MASK VC4_MASK(5, 4) | |
80 | # define DSI_TXPKT1C_CMD_CTRL_SHIFT 4 | |
81 | /* Command only. Uses TXPKT1H and DISPLAY_NO */ | |
82 | # define DSI_TXPKT1C_CMD_CTRL_TX 0 | |
83 | /* Command with BTA for either ack or read data. */ | |
84 | # define DSI_TXPKT1C_CMD_CTRL_RX 1 | |
85 | /* Trigger according to TRIG_CMD */ | |
86 | # define DSI_TXPKT1C_CMD_CTRL_TRIG 2 | |
87 | /* BTA alone for getting error status after a command, or a TE trigger | |
88 | * without a previous command. | |
89 | */ | |
90 | # define DSI_TXPKT1C_CMD_CTRL_BTA 3 | |
91 | ||
92 | # define DSI_TXPKT1C_CMD_MODE_LP BIT(3) | |
93 | # define DSI_TXPKT1C_CMD_TYPE_LONG BIT(2) | |
94 | # define DSI_TXPKT1C_CMD_TE_EN BIT(1) | |
95 | # define DSI_TXPKT1C_CMD_EN BIT(0) | |
96 | ||
97 | /* Command packet header. */ | |
98 | #define DSI0_TXPKT1H 0x08 /* AKA PKTH */ | |
99 | #define DSI1_TXPKT1H 0x08 | |
100 | # define DSI_TXPKT1H_BC_CMDFIFO_MASK VC4_MASK(31, 24) | |
101 | # define DSI_TXPKT1H_BC_CMDFIFO_SHIFT 24 | |
102 | # define DSI_TXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) | |
103 | # define DSI_TXPKT1H_BC_PARAM_SHIFT 8 | |
104 | # define DSI_TXPKT1H_BC_DT_MASK VC4_MASK(7, 0) | |
105 | # define DSI_TXPKT1H_BC_DT_SHIFT 0 | |
106 | ||
107 | #define DSI0_RXPKT1H 0x0c /* AKA RX1_PKTH */ | |
108 | #define DSI1_RXPKT1H 0x14 | |
109 | # define DSI_RXPKT1H_CRC_ERR BIT(31) | |
110 | # define DSI_RXPKT1H_DET_ERR BIT(30) | |
111 | # define DSI_RXPKT1H_ECC_ERR BIT(29) | |
112 | # define DSI_RXPKT1H_COR_ERR BIT(28) | |
113 | # define DSI_RXPKT1H_INCOMP_PKT BIT(25) | |
114 | # define DSI_RXPKT1H_PKT_TYPE_LONG BIT(24) | |
115 | /* Byte count if DSI_RXPKT1H_PKT_TYPE_LONG */ | |
116 | # define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) | |
117 | # define DSI_RXPKT1H_BC_PARAM_SHIFT 8 | |
118 | /* Short return bytes if !DSI_RXPKT1H_PKT_TYPE_LONG */ | |
119 | # define DSI_RXPKT1H_SHORT_1_MASK VC4_MASK(23, 16) | |
120 | # define DSI_RXPKT1H_SHORT_1_SHIFT 16 | |
121 | # define DSI_RXPKT1H_SHORT_0_MASK VC4_MASK(15, 8) | |
122 | # define DSI_RXPKT1H_SHORT_0_SHIFT 8 | |
123 | # define DSI_RXPKT1H_DT_LP_CMD_MASK VC4_MASK(7, 0) | |
124 | # define DSI_RXPKT1H_DT_LP_CMD_SHIFT 0 | |
125 | ||
126 | #define DSI0_RXPKT2H 0x10 /* AKA RX2_PKTH */ | |
127 | #define DSI1_RXPKT2H 0x18 | |
128 | # define DSI_RXPKT1H_DET_ERR BIT(30) | |
129 | # define DSI_RXPKT1H_ECC_ERR BIT(29) | |
130 | # define DSI_RXPKT1H_COR_ERR BIT(28) | |
131 | # define DSI_RXPKT1H_INCOMP_PKT BIT(25) | |
132 | # define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) | |
133 | # define DSI_RXPKT1H_BC_PARAM_SHIFT 8 | |
134 | # define DSI_RXPKT1H_DT_MASK VC4_MASK(7, 0) | |
135 | # define DSI_RXPKT1H_DT_SHIFT 0 | |
136 | ||
137 | #define DSI0_TXPKT_CMD_FIFO 0x14 /* AKA CMD_DATAF */ | |
138 | #define DSI1_TXPKT_CMD_FIFO 0x1c | |
139 | ||
140 | #define DSI0_DISP0_CTRL 0x18 | |
141 | # define DSI_DISP0_PIX_CLK_DIV_MASK VC4_MASK(21, 13) | |
142 | # define DSI_DISP0_PIX_CLK_DIV_SHIFT 13 | |
143 | # define DSI_DISP0_LP_STOP_CTRL_MASK VC4_MASK(12, 11) | |
144 | # define DSI_DISP0_LP_STOP_CTRL_SHIFT 11 | |
145 | # define DSI_DISP0_LP_STOP_DISABLE 0 | |
146 | # define DSI_DISP0_LP_STOP_PERLINE 1 | |
147 | # define DSI_DISP0_LP_STOP_PERFRAME 2 | |
148 | ||
149 | /* Transmit RGB pixels and null packets only during HACTIVE, instead | |
150 | * of going to LP-STOP. | |
151 | */ | |
152 | # define DSI_DISP_HACTIVE_NULL BIT(10) | |
153 | /* Transmit blanking packet only during vblank, instead of allowing LP-STOP. */ | |
154 | # define DSI_DISP_VBLP_CTRL BIT(9) | |
155 | /* Transmit blanking packet only during HFP, instead of allowing LP-STOP. */ | |
156 | # define DSI_DISP_HFP_CTRL BIT(8) | |
157 | /* Transmit blanking packet only during HBP, instead of allowing LP-STOP. */ | |
158 | # define DSI_DISP_HBP_CTRL BIT(7) | |
159 | # define DSI_DISP0_CHANNEL_MASK VC4_MASK(6, 5) | |
160 | # define DSI_DISP0_CHANNEL_SHIFT 5 | |
161 | /* Enables end events for HSYNC/VSYNC, not just start events. */ | |
162 | # define DSI_DISP0_ST_END BIT(4) | |
163 | # define DSI_DISP0_PFORMAT_MASK VC4_MASK(3, 2) | |
164 | # define DSI_DISP0_PFORMAT_SHIFT 2 | |
165 | # define DSI_PFORMAT_RGB565 0 | |
166 | # define DSI_PFORMAT_RGB666_PACKED 1 | |
167 | # define DSI_PFORMAT_RGB666 2 | |
168 | # define DSI_PFORMAT_RGB888 3 | |
169 | /* Default is VIDEO mode. */ | |
170 | # define DSI_DISP0_COMMAND_MODE BIT(1) | |
171 | # define DSI_DISP0_ENABLE BIT(0) | |
172 | ||
173 | #define DSI0_DISP1_CTRL 0x1c | |
174 | #define DSI1_DISP1_CTRL 0x2c | |
175 | /* Format of the data written to TXPKT_PIX_FIFO. */ | |
176 | # define DSI_DISP1_PFORMAT_MASK VC4_MASK(2, 1) | |
177 | # define DSI_DISP1_PFORMAT_SHIFT 1 | |
178 | # define DSI_DISP1_PFORMAT_16BIT 0 | |
179 | # define DSI_DISP1_PFORMAT_24BIT 1 | |
180 | # define DSI_DISP1_PFORMAT_32BIT_LE 2 | |
181 | # define DSI_DISP1_PFORMAT_32BIT_BE 3 | |
182 | ||
183 | /* DISP1 is always command mode. */ | |
184 | # define DSI_DISP1_ENABLE BIT(0) | |
185 | ||
186 | #define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */ | |
187 | ||
188 | #define DSI0_INT_STAT 0x24 | |
189 | #define DSI0_INT_EN 0x28 | |
190 | # define DSI1_INT_PHY_D3_ULPS BIT(30) | |
191 | # define DSI1_INT_PHY_D3_STOP BIT(29) | |
192 | # define DSI1_INT_PHY_D2_ULPS BIT(28) | |
193 | # define DSI1_INT_PHY_D2_STOP BIT(27) | |
194 | # define DSI1_INT_PHY_D1_ULPS BIT(26) | |
195 | # define DSI1_INT_PHY_D1_STOP BIT(25) | |
196 | # define DSI1_INT_PHY_D0_ULPS BIT(24) | |
197 | # define DSI1_INT_PHY_D0_STOP BIT(23) | |
198 | # define DSI1_INT_FIFO_ERR BIT(22) | |
199 | # define DSI1_INT_PHY_DIR_RTF BIT(21) | |
200 | # define DSI1_INT_PHY_RXLPDT BIT(20) | |
201 | # define DSI1_INT_PHY_RXTRIG BIT(19) | |
202 | # define DSI1_INT_PHY_D0_LPDT BIT(18) | |
203 | # define DSI1_INT_PHY_DIR_FTR BIT(17) | |
204 | ||
205 | /* Signaled when the clock lane enters the given state. */ | |
206 | # define DSI1_INT_PHY_CLOCK_ULPS BIT(16) | |
207 | # define DSI1_INT_PHY_CLOCK_HS BIT(15) | |
208 | # define DSI1_INT_PHY_CLOCK_STOP BIT(14) | |
209 | ||
210 | /* Signaled on timeouts */ | |
211 | # define DSI1_INT_PR_TO BIT(13) | |
212 | # define DSI1_INT_TA_TO BIT(12) | |
213 | # define DSI1_INT_LPRX_TO BIT(11) | |
214 | # define DSI1_INT_HSTX_TO BIT(10) | |
215 | ||
216 | /* Contention on a line when trying to drive the line low */ | |
217 | # define DSI1_INT_ERR_CONT_LP1 BIT(9) | |
218 | # define DSI1_INT_ERR_CONT_LP0 BIT(8) | |
219 | ||
220 | /* Control error: incorrect line state sequence on data lane 0. */ | |
221 | # define DSI1_INT_ERR_CONTROL BIT(7) | |
222 | /* LPDT synchronization error (bits received not a multiple of 8. */ | |
223 | ||
224 | # define DSI1_INT_ERR_SYNC_ESC BIT(6) | |
225 | /* Signaled after receiving an error packet from the display in | |
226 | * response to a read. | |
227 | */ | |
228 | # define DSI1_INT_RXPKT2 BIT(5) | |
229 | /* Signaled after receiving a packet. The header and optional short | |
230 | * response will be in RXPKT1H, and a long response will be in the | |
231 | * RXPKT_FIFO. | |
232 | */ | |
233 | # define DSI1_INT_RXPKT1 BIT(4) | |
234 | # define DSI1_INT_TXPKT2_DONE BIT(3) | |
235 | # define DSI1_INT_TXPKT2_END BIT(2) | |
236 | /* Signaled after all repeats of TXPKT1 are transferred. */ | |
237 | # define DSI1_INT_TXPKT1_DONE BIT(1) | |
238 | /* Signaled after each TXPKT1 repeat is scheduled. */ | |
239 | # define DSI1_INT_TXPKT1_END BIT(0) | |
240 | ||
241 | #define DSI1_INTERRUPTS_ALWAYS_ENABLED (DSI1_INT_ERR_SYNC_ESC | \ | |
242 | DSI1_INT_ERR_CONTROL | \ | |
243 | DSI1_INT_ERR_CONT_LP0 | \ | |
244 | DSI1_INT_ERR_CONT_LP1 | \ | |
245 | DSI1_INT_HSTX_TO | \ | |
246 | DSI1_INT_LPRX_TO | \ | |
247 | DSI1_INT_TA_TO | \ | |
248 | DSI1_INT_PR_TO) | |
249 | ||
250 | #define DSI0_STAT 0x2c | |
251 | #define DSI0_HSTX_TO_CNT 0x30 | |
252 | #define DSI0_LPRX_TO_CNT 0x34 | |
253 | #define DSI0_TA_TO_CNT 0x38 | |
254 | #define DSI0_PR_TO_CNT 0x3c | |
255 | #define DSI0_PHYC 0x40 | |
256 | # define DSI1_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(25, 20) | |
257 | # define DSI1_PHYC_ESC_CLK_LPDT_SHIFT 20 | |
258 | # define DSI1_PHYC_HS_CLK_CONTINUOUS BIT(18) | |
259 | # define DSI0_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(17, 12) | |
260 | # define DSI0_PHYC_ESC_CLK_LPDT_SHIFT 12 | |
261 | # define DSI1_PHYC_CLANE_ULPS BIT(17) | |
262 | # define DSI1_PHYC_CLANE_ENABLE BIT(16) | |
263 | # define DSI_PHYC_DLANE3_ULPS BIT(13) | |
264 | # define DSI_PHYC_DLANE3_ENABLE BIT(12) | |
265 | # define DSI0_PHYC_HS_CLK_CONTINUOUS BIT(10) | |
266 | # define DSI0_PHYC_CLANE_ULPS BIT(9) | |
267 | # define DSI_PHYC_DLANE2_ULPS BIT(9) | |
268 | # define DSI0_PHYC_CLANE_ENABLE BIT(8) | |
269 | # define DSI_PHYC_DLANE2_ENABLE BIT(8) | |
270 | # define DSI_PHYC_DLANE1_ULPS BIT(5) | |
271 | # define DSI_PHYC_DLANE1_ENABLE BIT(4) | |
272 | # define DSI_PHYC_DLANE0_FORCE_STOP BIT(2) | |
273 | # define DSI_PHYC_DLANE0_ULPS BIT(1) | |
274 | # define DSI_PHYC_DLANE0_ENABLE BIT(0) | |
275 | ||
276 | #define DSI0_HS_CLT0 0x44 | |
277 | #define DSI0_HS_CLT1 0x48 | |
278 | #define DSI0_HS_CLT2 0x4c | |
279 | #define DSI0_HS_DLT3 0x50 | |
280 | #define DSI0_HS_DLT4 0x54 | |
281 | #define DSI0_HS_DLT5 0x58 | |
282 | #define DSI0_HS_DLT6 0x5c | |
283 | #define DSI0_HS_DLT7 0x60 | |
284 | ||
285 | #define DSI0_PHY_AFEC0 0x64 | |
286 | # define DSI0_PHY_AFEC0_DDR2CLK_EN BIT(26) | |
287 | # define DSI0_PHY_AFEC0_DDRCLK_EN BIT(25) | |
288 | # define DSI0_PHY_AFEC0_LATCH_ULPS BIT(24) | |
289 | # define DSI1_PHY_AFEC0_IDR_DLANE3_MASK VC4_MASK(31, 29) | |
290 | # define DSI1_PHY_AFEC0_IDR_DLANE3_SHIFT 29 | |
291 | # define DSI1_PHY_AFEC0_IDR_DLANE2_MASK VC4_MASK(28, 26) | |
292 | # define DSI1_PHY_AFEC0_IDR_DLANE2_SHIFT 26 | |
293 | # define DSI1_PHY_AFEC0_IDR_DLANE1_MASK VC4_MASK(27, 23) | |
294 | # define DSI1_PHY_AFEC0_IDR_DLANE1_SHIFT 23 | |
295 | # define DSI1_PHY_AFEC0_IDR_DLANE0_MASK VC4_MASK(22, 20) | |
296 | # define DSI1_PHY_AFEC0_IDR_DLANE0_SHIFT 20 | |
297 | # define DSI1_PHY_AFEC0_IDR_CLANE_MASK VC4_MASK(19, 17) | |
298 | # define DSI1_PHY_AFEC0_IDR_CLANE_SHIFT 17 | |
299 | # define DSI0_PHY_AFEC0_ACTRL_DLANE1_MASK VC4_MASK(23, 20) | |
300 | # define DSI0_PHY_AFEC0_ACTRL_DLANE1_SHIFT 20 | |
301 | # define DSI0_PHY_AFEC0_ACTRL_DLANE0_MASK VC4_MASK(19, 16) | |
302 | # define DSI0_PHY_AFEC0_ACTRL_DLANE0_SHIFT 16 | |
303 | # define DSI0_PHY_AFEC0_ACTRL_CLANE_MASK VC4_MASK(15, 12) | |
304 | # define DSI0_PHY_AFEC0_ACTRL_CLANE_SHIFT 12 | |
305 | # define DSI1_PHY_AFEC0_DDR2CLK_EN BIT(16) | |
306 | # define DSI1_PHY_AFEC0_DDRCLK_EN BIT(15) | |
307 | # define DSI1_PHY_AFEC0_LATCH_ULPS BIT(14) | |
308 | # define DSI1_PHY_AFEC0_RESET BIT(13) | |
309 | # define DSI1_PHY_AFEC0_PD BIT(12) | |
310 | # define DSI0_PHY_AFEC0_RESET BIT(11) | |
311 | # define DSI1_PHY_AFEC0_PD_BG BIT(11) | |
312 | # define DSI0_PHY_AFEC0_PD BIT(10) | |
313 | # define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10) | |
314 | # define DSI0_PHY_AFEC0_PD_BG BIT(9) | |
315 | # define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9) | |
316 | # define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8) | |
317 | # define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8) | |
318 | # define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4) | |
319 | # define DSI_PHY_AFEC0_PTATADJ_SHIFT 4 | |
320 | # define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0) | |
321 | # define DSI_PHY_AFEC0_CTATADJ_SHIFT 0 | |
322 | ||
323 | #define DSI0_PHY_AFEC1 0x68 | |
324 | # define DSI0_PHY_AFEC1_IDR_DLANE1_MASK VC4_MASK(10, 8) | |
325 | # define DSI0_PHY_AFEC1_IDR_DLANE1_SHIFT 8 | |
326 | # define DSI0_PHY_AFEC1_IDR_DLANE0_MASK VC4_MASK(6, 4) | |
327 | # define DSI0_PHY_AFEC1_IDR_DLANE0_SHIFT 4 | |
328 | # define DSI0_PHY_AFEC1_IDR_CLANE_MASK VC4_MASK(2, 0) | |
329 | # define DSI0_PHY_AFEC1_IDR_CLANE_SHIFT 0 | |
330 | ||
331 | #define DSI0_TST_SEL 0x6c | |
332 | #define DSI0_TST_MON 0x70 | |
333 | #define DSI0_ID 0x74 | |
334 | # define DSI_ID_VALUE 0x00647369 | |
335 | ||
336 | #define DSI1_CTRL 0x00 | |
337 | # define DSI_CTRL_HS_CLKC_MASK VC4_MASK(15, 14) | |
338 | # define DSI_CTRL_HS_CLKC_SHIFT 14 | |
339 | # define DSI_CTRL_HS_CLKC_BYTE 0 | |
340 | # define DSI_CTRL_HS_CLKC_DDR2 1 | |
341 | # define DSI_CTRL_HS_CLKC_DDR 2 | |
342 | ||
343 | # define DSI_CTRL_RX_LPDT_EOT_DISABLE BIT(13) | |
344 | # define DSI_CTRL_LPDT_EOT_DISABLE BIT(12) | |
345 | # define DSI_CTRL_HSDT_EOT_DISABLE BIT(11) | |
346 | # define DSI_CTRL_SOFT_RESET_CFG BIT(10) | |
347 | # define DSI_CTRL_CAL_BYTE BIT(9) | |
348 | # define DSI_CTRL_INV_BYTE BIT(8) | |
349 | # define DSI_CTRL_CLR_LDF BIT(7) | |
350 | # define DSI0_CTRL_CLR_PBCF BIT(6) | |
351 | # define DSI1_CTRL_CLR_RXF BIT(6) | |
352 | # define DSI0_CTRL_CLR_CPBCF BIT(5) | |
353 | # define DSI1_CTRL_CLR_PDF BIT(5) | |
354 | # define DSI0_CTRL_CLR_PDF BIT(4) | |
355 | # define DSI1_CTRL_CLR_CDF BIT(4) | |
356 | # define DSI0_CTRL_CLR_CDF BIT(3) | |
357 | # define DSI0_CTRL_CTRL2 BIT(2) | |
358 | # define DSI1_CTRL_DISABLE_DISP_CRCC BIT(2) | |
359 | # define DSI0_CTRL_CTRL1 BIT(1) | |
360 | # define DSI1_CTRL_DISABLE_DISP_ECCC BIT(1) | |
361 | # define DSI0_CTRL_CTRL0 BIT(0) | |
362 | # define DSI1_CTRL_EN BIT(0) | |
363 | # define DSI0_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \ | |
364 | DSI0_CTRL_CLR_PBCF | \ | |
365 | DSI0_CTRL_CLR_CPBCF | \ | |
366 | DSI0_CTRL_CLR_PDF | \ | |
367 | DSI0_CTRL_CLR_CDF) | |
368 | # define DSI1_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \ | |
369 | DSI1_CTRL_CLR_RXF | \ | |
370 | DSI1_CTRL_CLR_PDF | \ | |
371 | DSI1_CTRL_CLR_CDF) | |
372 | ||
373 | #define DSI1_TXPKT2C 0x0c | |
374 | #define DSI1_TXPKT2H 0x10 | |
375 | #define DSI1_TXPKT_PIX_FIFO 0x20 | |
376 | #define DSI1_RXPKT_FIFO 0x24 | |
377 | #define DSI1_DISP0_CTRL 0x28 | |
378 | #define DSI1_INT_STAT 0x30 | |
379 | #define DSI1_INT_EN 0x34 | |
380 | /* State reporting bits. These mostly behave like INT_STAT, where | |
381 | * writing a 1 clears the bit. | |
382 | */ | |
383 | #define DSI1_STAT 0x38 | |
384 | # define DSI1_STAT_PHY_D3_ULPS BIT(31) | |
385 | # define DSI1_STAT_PHY_D3_STOP BIT(30) | |
386 | # define DSI1_STAT_PHY_D2_ULPS BIT(29) | |
387 | # define DSI1_STAT_PHY_D2_STOP BIT(28) | |
388 | # define DSI1_STAT_PHY_D1_ULPS BIT(27) | |
389 | # define DSI1_STAT_PHY_D1_STOP BIT(26) | |
390 | # define DSI1_STAT_PHY_D0_ULPS BIT(25) | |
391 | # define DSI1_STAT_PHY_D0_STOP BIT(24) | |
392 | # define DSI1_STAT_FIFO_ERR BIT(23) | |
393 | # define DSI1_STAT_PHY_RXLPDT BIT(22) | |
394 | # define DSI1_STAT_PHY_RXTRIG BIT(21) | |
395 | # define DSI1_STAT_PHY_D0_LPDT BIT(20) | |
396 | /* Set when in forward direction */ | |
397 | # define DSI1_STAT_PHY_DIR BIT(19) | |
398 | # define DSI1_STAT_PHY_CLOCK_ULPS BIT(18) | |
399 | # define DSI1_STAT_PHY_CLOCK_HS BIT(17) | |
400 | # define DSI1_STAT_PHY_CLOCK_STOP BIT(16) | |
401 | # define DSI1_STAT_PR_TO BIT(15) | |
402 | # define DSI1_STAT_TA_TO BIT(14) | |
403 | # define DSI1_STAT_LPRX_TO BIT(13) | |
404 | # define DSI1_STAT_HSTX_TO BIT(12) | |
405 | # define DSI1_STAT_ERR_CONT_LP1 BIT(11) | |
406 | # define DSI1_STAT_ERR_CONT_LP0 BIT(10) | |
407 | # define DSI1_STAT_ERR_CONTROL BIT(9) | |
408 | # define DSI1_STAT_ERR_SYNC_ESC BIT(8) | |
409 | # define DSI1_STAT_RXPKT2 BIT(7) | |
410 | # define DSI1_STAT_RXPKT1 BIT(6) | |
411 | # define DSI1_STAT_TXPKT2_BUSY BIT(5) | |
412 | # define DSI1_STAT_TXPKT2_DONE BIT(4) | |
413 | # define DSI1_STAT_TXPKT2_END BIT(3) | |
414 | # define DSI1_STAT_TXPKT1_BUSY BIT(2) | |
415 | # define DSI1_STAT_TXPKT1_DONE BIT(1) | |
416 | # define DSI1_STAT_TXPKT1_END BIT(0) | |
417 | ||
418 | #define DSI1_HSTX_TO_CNT 0x3c | |
419 | #define DSI1_LPRX_TO_CNT 0x40 | |
420 | #define DSI1_TA_TO_CNT 0x44 | |
421 | #define DSI1_PR_TO_CNT 0x48 | |
422 | #define DSI1_PHYC 0x4c | |
423 | ||
424 | #define DSI1_HS_CLT0 0x50 | |
425 | # define DSI_HS_CLT0_CZERO_MASK VC4_MASK(26, 18) | |
426 | # define DSI_HS_CLT0_CZERO_SHIFT 18 | |
427 | # define DSI_HS_CLT0_CPRE_MASK VC4_MASK(17, 9) | |
428 | # define DSI_HS_CLT0_CPRE_SHIFT 9 | |
429 | # define DSI_HS_CLT0_CPREP_MASK VC4_MASK(8, 0) | |
430 | # define DSI_HS_CLT0_CPREP_SHIFT 0 | |
431 | ||
432 | #define DSI1_HS_CLT1 0x54 | |
433 | # define DSI_HS_CLT1_CTRAIL_MASK VC4_MASK(17, 9) | |
434 | # define DSI_HS_CLT1_CTRAIL_SHIFT 9 | |
435 | # define DSI_HS_CLT1_CPOST_MASK VC4_MASK(8, 0) | |
436 | # define DSI_HS_CLT1_CPOST_SHIFT 0 | |
437 | ||
438 | #define DSI1_HS_CLT2 0x58 | |
439 | # define DSI_HS_CLT2_WUP_MASK VC4_MASK(23, 0) | |
440 | # define DSI_HS_CLT2_WUP_SHIFT 0 | |
441 | ||
442 | #define DSI1_HS_DLT3 0x5c | |
443 | # define DSI_HS_DLT3_EXIT_MASK VC4_MASK(26, 18) | |
444 | # define DSI_HS_DLT3_EXIT_SHIFT 18 | |
445 | # define DSI_HS_DLT3_ZERO_MASK VC4_MASK(17, 9) | |
446 | # define DSI_HS_DLT3_ZERO_SHIFT 9 | |
447 | # define DSI_HS_DLT3_PRE_MASK VC4_MASK(8, 0) | |
448 | # define DSI_HS_DLT3_PRE_SHIFT 0 | |
449 | ||
450 | #define DSI1_HS_DLT4 0x60 | |
451 | # define DSI_HS_DLT4_ANLAT_MASK VC4_MASK(22, 18) | |
452 | # define DSI_HS_DLT4_ANLAT_SHIFT 18 | |
453 | # define DSI_HS_DLT4_TRAIL_MASK VC4_MASK(17, 9) | |
454 | # define DSI_HS_DLT4_TRAIL_SHIFT 9 | |
455 | # define DSI_HS_DLT4_LPX_MASK VC4_MASK(8, 0) | |
456 | # define DSI_HS_DLT4_LPX_SHIFT 0 | |
457 | ||
458 | #define DSI1_HS_DLT5 0x64 | |
459 | # define DSI_HS_DLT5_INIT_MASK VC4_MASK(23, 0) | |
460 | # define DSI_HS_DLT5_INIT_SHIFT 0 | |
461 | ||
462 | #define DSI1_HS_DLT6 0x68 | |
463 | # define DSI_HS_DLT6_TA_GET_MASK VC4_MASK(31, 24) | |
464 | # define DSI_HS_DLT6_TA_GET_SHIFT 24 | |
465 | # define DSI_HS_DLT6_TA_SURE_MASK VC4_MASK(23, 16) | |
466 | # define DSI_HS_DLT6_TA_SURE_SHIFT 16 | |
467 | # define DSI_HS_DLT6_TA_GO_MASK VC4_MASK(15, 8) | |
468 | # define DSI_HS_DLT6_TA_GO_SHIFT 8 | |
469 | # define DSI_HS_DLT6_LP_LPX_MASK VC4_MASK(7, 0) | |
470 | # define DSI_HS_DLT6_LP_LPX_SHIFT 0 | |
471 | ||
472 | #define DSI1_HS_DLT7 0x6c | |
473 | # define DSI_HS_DLT7_LP_WUP_MASK VC4_MASK(23, 0) | |
474 | # define DSI_HS_DLT7_LP_WUP_SHIFT 0 | |
475 | ||
476 | #define DSI1_PHY_AFEC0 0x70 | |
477 | ||
478 | #define DSI1_PHY_AFEC1 0x74 | |
479 | # define DSI1_PHY_AFEC1_ACTRL_DLANE3_MASK VC4_MASK(19, 16) | |
480 | # define DSI1_PHY_AFEC1_ACTRL_DLANE3_SHIFT 16 | |
481 | # define DSI1_PHY_AFEC1_ACTRL_DLANE2_MASK VC4_MASK(15, 12) | |
482 | # define DSI1_PHY_AFEC1_ACTRL_DLANE2_SHIFT 12 | |
483 | # define DSI1_PHY_AFEC1_ACTRL_DLANE1_MASK VC4_MASK(11, 8) | |
484 | # define DSI1_PHY_AFEC1_ACTRL_DLANE1_SHIFT 8 | |
485 | # define DSI1_PHY_AFEC1_ACTRL_DLANE0_MASK VC4_MASK(7, 4) | |
486 | # define DSI1_PHY_AFEC1_ACTRL_DLANE0_SHIFT 4 | |
487 | # define DSI1_PHY_AFEC1_ACTRL_CLANE_MASK VC4_MASK(3, 0) | |
488 | # define DSI1_PHY_AFEC1_ACTRL_CLANE_SHIFT 0 | |
489 | ||
490 | #define DSI1_TST_SEL 0x78 | |
491 | #define DSI1_TST_MON 0x7c | |
492 | #define DSI1_PHY_TST1 0x80 | |
493 | #define DSI1_PHY_TST2 0x84 | |
494 | #define DSI1_PHY_FIFO_STAT 0x88 | |
495 | /* Actually, all registers in the range that aren't otherwise claimed | |
496 | * will return the ID. | |
497 | */ | |
498 | #define DSI1_ID 0x8c | |
499 | ||
500 | /* General DSI hardware state. */ | |
501 | struct vc4_dsi { | |
502 | struct platform_device *pdev; | |
503 | ||
504 | struct mipi_dsi_host dsi_host; | |
505 | struct drm_encoder *encoder; | |
506 | struct drm_connector *connector; | |
507 | struct drm_panel *panel; | |
508 | ||
509 | void __iomem *regs; | |
510 | ||
511 | struct dma_chan *reg_dma_chan; | |
512 | dma_addr_t reg_dma_paddr; | |
513 | u32 *reg_dma_mem; | |
514 | dma_addr_t reg_paddr; | |
515 | ||
516 | /* Whether we're on bcm2835's DSI0 or DSI1. */ | |
517 | int port; | |
518 | ||
519 | /* DSI channel for the panel we're connected to. */ | |
520 | u32 channel; | |
521 | u32 lanes; | |
522 | enum mipi_dsi_pixel_format format; | |
523 | u32 mode_flags; | |
524 | ||
525 | /* Input clock from CPRMAN to the digital PHY, for the DSI | |
526 | * escape clock. | |
527 | */ | |
528 | struct clk *escape_clock; | |
529 | ||
530 | /* Input clock to the analog PHY, used to generate the DSI bit | |
531 | * clock. | |
532 | */ | |
533 | struct clk *pll_phy_clock; | |
534 | ||
535 | /* HS Clocks generated within the DSI analog PHY. */ | |
536 | struct clk_fixed_factor phy_clocks[3]; | |
537 | ||
538 | struct clk_hw_onecell_data *clk_onecell; | |
539 | ||
540 | /* Pixel clock output to the pixelvalve, generated from the HS | |
541 | * clock. | |
542 | */ | |
543 | struct clk *pixel_clock; | |
544 | ||
545 | struct completion xfer_completion; | |
546 | int xfer_result; | |
547 | }; | |
548 | ||
549 | #define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host) | |
550 | ||
551 | static inline void | |
552 | dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val) | |
553 | { | |
554 | struct dma_chan *chan = dsi->reg_dma_chan; | |
555 | struct dma_async_tx_descriptor *tx; | |
556 | dma_cookie_t cookie; | |
557 | int ret; | |
558 | ||
559 | /* DSI0 should be able to write normally. */ | |
560 | if (!chan) { | |
561 | writel(val, dsi->regs + offset); | |
562 | return; | |
563 | } | |
564 | ||
565 | *dsi->reg_dma_mem = val; | |
566 | ||
567 | tx = chan->device->device_prep_dma_memcpy(chan, | |
568 | dsi->reg_paddr + offset, | |
569 | dsi->reg_dma_paddr, | |
570 | 4, 0); | |
571 | if (!tx) { | |
572 | DRM_ERROR("Failed to set up DMA register write\n"); | |
573 | return; | |
574 | } | |
575 | ||
576 | cookie = tx->tx_submit(tx); | |
577 | ret = dma_submit_error(cookie); | |
578 | if (ret) { | |
579 | DRM_ERROR("Failed to submit DMA: %d\n", ret); | |
580 | return; | |
581 | } | |
582 | ret = dma_sync_wait(chan, cookie); | |
583 | if (ret) | |
584 | DRM_ERROR("Failed to wait for DMA: %d\n", ret); | |
585 | } | |
586 | ||
587 | #define DSI_READ(offset) readl(dsi->regs + (offset)) | |
588 | #define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val) | |
589 | #define DSI_PORT_READ(offset) \ | |
590 | DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset) | |
591 | #define DSI_PORT_WRITE(offset, val) \ | |
592 | DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val) | |
593 | #define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit) | |
594 | ||
595 | /* VC4 DSI encoder KMS struct */ | |
596 | struct vc4_dsi_encoder { | |
597 | struct vc4_encoder base; | |
598 | struct vc4_dsi *dsi; | |
599 | }; | |
600 | ||
601 | static inline struct vc4_dsi_encoder * | |
602 | to_vc4_dsi_encoder(struct drm_encoder *encoder) | |
603 | { | |
604 | return container_of(encoder, struct vc4_dsi_encoder, base.base); | |
605 | } | |
606 | ||
607 | /* VC4 DSI connector KMS struct */ | |
608 | struct vc4_dsi_connector { | |
609 | struct drm_connector base; | |
610 | struct vc4_dsi *dsi; | |
611 | }; | |
612 | ||
613 | static inline struct vc4_dsi_connector * | |
614 | to_vc4_dsi_connector(struct drm_connector *connector) | |
615 | { | |
616 | return container_of(connector, struct vc4_dsi_connector, base); | |
617 | } | |
618 | ||
619 | #define DSI_REG(reg) { reg, #reg } | |
620 | static const struct { | |
621 | u32 reg; | |
622 | const char *name; | |
623 | } dsi0_regs[] = { | |
624 | DSI_REG(DSI0_CTRL), | |
625 | DSI_REG(DSI0_STAT), | |
626 | DSI_REG(DSI0_HSTX_TO_CNT), | |
627 | DSI_REG(DSI0_LPRX_TO_CNT), | |
628 | DSI_REG(DSI0_TA_TO_CNT), | |
629 | DSI_REG(DSI0_PR_TO_CNT), | |
630 | DSI_REG(DSI0_DISP0_CTRL), | |
631 | DSI_REG(DSI0_DISP1_CTRL), | |
632 | DSI_REG(DSI0_INT_STAT), | |
633 | DSI_REG(DSI0_INT_EN), | |
634 | DSI_REG(DSI0_PHYC), | |
635 | DSI_REG(DSI0_HS_CLT0), | |
636 | DSI_REG(DSI0_HS_CLT1), | |
637 | DSI_REG(DSI0_HS_CLT2), | |
638 | DSI_REG(DSI0_HS_DLT3), | |
639 | DSI_REG(DSI0_HS_DLT4), | |
640 | DSI_REG(DSI0_HS_DLT5), | |
641 | DSI_REG(DSI0_HS_DLT6), | |
642 | DSI_REG(DSI0_HS_DLT7), | |
643 | DSI_REG(DSI0_PHY_AFEC0), | |
644 | DSI_REG(DSI0_PHY_AFEC1), | |
645 | DSI_REG(DSI0_ID), | |
646 | }; | |
647 | ||
648 | static const struct { | |
649 | u32 reg; | |
650 | const char *name; | |
651 | } dsi1_regs[] = { | |
652 | DSI_REG(DSI1_CTRL), | |
653 | DSI_REG(DSI1_STAT), | |
654 | DSI_REG(DSI1_HSTX_TO_CNT), | |
655 | DSI_REG(DSI1_LPRX_TO_CNT), | |
656 | DSI_REG(DSI1_TA_TO_CNT), | |
657 | DSI_REG(DSI1_PR_TO_CNT), | |
658 | DSI_REG(DSI1_DISP0_CTRL), | |
659 | DSI_REG(DSI1_DISP1_CTRL), | |
660 | DSI_REG(DSI1_INT_STAT), | |
661 | DSI_REG(DSI1_INT_EN), | |
662 | DSI_REG(DSI1_PHYC), | |
663 | DSI_REG(DSI1_HS_CLT0), | |
664 | DSI_REG(DSI1_HS_CLT1), | |
665 | DSI_REG(DSI1_HS_CLT2), | |
666 | DSI_REG(DSI1_HS_DLT3), | |
667 | DSI_REG(DSI1_HS_DLT4), | |
668 | DSI_REG(DSI1_HS_DLT5), | |
669 | DSI_REG(DSI1_HS_DLT6), | |
670 | DSI_REG(DSI1_HS_DLT7), | |
671 | DSI_REG(DSI1_PHY_AFEC0), | |
672 | DSI_REG(DSI1_PHY_AFEC1), | |
673 | DSI_REG(DSI1_ID), | |
674 | }; | |
675 | ||
676 | static void vc4_dsi_dump_regs(struct vc4_dsi *dsi) | |
677 | { | |
678 | int i; | |
679 | ||
680 | if (dsi->port == 0) { | |
681 | for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) { | |
682 | DRM_INFO("0x%04x (%s): 0x%08x\n", | |
683 | dsi0_regs[i].reg, dsi0_regs[i].name, | |
684 | DSI_READ(dsi0_regs[i].reg)); | |
685 | } | |
686 | } else { | |
687 | for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) { | |
688 | DRM_INFO("0x%04x (%s): 0x%08x\n", | |
689 | dsi1_regs[i].reg, dsi1_regs[i].name, | |
690 | DSI_READ(dsi1_regs[i].reg)); | |
691 | } | |
692 | } | |
693 | } | |
694 | ||
695 | #ifdef CONFIG_DEBUG_FS | |
696 | int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused) | |
697 | { | |
698 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
699 | struct drm_device *drm = node->minor->dev; | |
700 | struct vc4_dev *vc4 = to_vc4_dev(drm); | |
701 | int dsi_index = (uintptr_t)node->info_ent->data; | |
702 | struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL); | |
703 | int i; | |
704 | ||
705 | if (!dsi) | |
706 | return 0; | |
707 | ||
708 | if (dsi->port == 0) { | |
709 | for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) { | |
710 | seq_printf(m, "0x%04x (%s): 0x%08x\n", | |
711 | dsi0_regs[i].reg, dsi0_regs[i].name, | |
712 | DSI_READ(dsi0_regs[i].reg)); | |
713 | } | |
714 | } else { | |
715 | for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) { | |
716 | seq_printf(m, "0x%04x (%s): 0x%08x\n", | |
717 | dsi1_regs[i].reg, dsi1_regs[i].name, | |
718 | DSI_READ(dsi1_regs[i].reg)); | |
719 | } | |
720 | } | |
721 | ||
722 | return 0; | |
723 | } | |
724 | #endif | |
725 | ||
726 | static enum drm_connector_status | |
727 | vc4_dsi_connector_detect(struct drm_connector *connector, bool force) | |
728 | { | |
729 | struct vc4_dsi_connector *vc4_connector = | |
730 | to_vc4_dsi_connector(connector); | |
731 | struct vc4_dsi *dsi = vc4_connector->dsi; | |
732 | ||
733 | if (dsi->panel) | |
734 | return connector_status_connected; | |
735 | else | |
736 | return connector_status_disconnected; | |
737 | } | |
738 | ||
739 | static void vc4_dsi_connector_destroy(struct drm_connector *connector) | |
740 | { | |
741 | drm_connector_unregister(connector); | |
742 | drm_connector_cleanup(connector); | |
743 | } | |
744 | ||
745 | static int vc4_dsi_connector_get_modes(struct drm_connector *connector) | |
746 | { | |
747 | struct vc4_dsi_connector *vc4_connector = | |
748 | to_vc4_dsi_connector(connector); | |
749 | struct vc4_dsi *dsi = vc4_connector->dsi; | |
750 | ||
751 | if (dsi->panel) | |
752 | return drm_panel_get_modes(dsi->panel); | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
757 | static const struct drm_connector_funcs vc4_dsi_connector_funcs = { | |
758 | .dpms = drm_atomic_helper_connector_dpms, | |
759 | .detect = vc4_dsi_connector_detect, | |
760 | .fill_modes = drm_helper_probe_single_connector_modes, | |
761 | .destroy = vc4_dsi_connector_destroy, | |
762 | .reset = drm_atomic_helper_connector_reset, | |
763 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | |
764 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | |
765 | }; | |
766 | ||
767 | static const struct drm_connector_helper_funcs vc4_dsi_connector_helper_funcs = { | |
768 | .get_modes = vc4_dsi_connector_get_modes, | |
769 | }; | |
770 | ||
771 | static struct drm_connector *vc4_dsi_connector_init(struct drm_device *dev, | |
772 | struct vc4_dsi *dsi) | |
773 | { | |
fce6a7bc | 774 | struct drm_connector *connector; |
4078f575 | 775 | struct vc4_dsi_connector *dsi_connector; |
4078f575 EA |
776 | |
777 | dsi_connector = devm_kzalloc(dev->dev, sizeof(*dsi_connector), | |
778 | GFP_KERNEL); | |
fce6a7bc CIK |
779 | if (!dsi_connector) |
780 | return ERR_PTR(-ENOMEM); | |
781 | ||
4078f575 EA |
782 | connector = &dsi_connector->base; |
783 | ||
784 | dsi_connector->dsi = dsi; | |
785 | ||
786 | drm_connector_init(dev, connector, &vc4_dsi_connector_funcs, | |
787 | DRM_MODE_CONNECTOR_DSI); | |
788 | drm_connector_helper_add(connector, &vc4_dsi_connector_helper_funcs); | |
789 | ||
790 | connector->polled = 0; | |
791 | connector->interlace_allowed = 0; | |
792 | connector->doublescan_allowed = 0; | |
793 | ||
794 | drm_mode_connector_attach_encoder(connector, dsi->encoder); | |
795 | ||
796 | return connector; | |
4078f575 EA |
797 | } |
798 | ||
799 | static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder) | |
800 | { | |
801 | drm_encoder_cleanup(encoder); | |
802 | } | |
803 | ||
804 | static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = { | |
805 | .destroy = vc4_dsi_encoder_destroy, | |
806 | }; | |
807 | ||
808 | static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch) | |
809 | { | |
810 | u32 afec0 = DSI_PORT_READ(PHY_AFEC0); | |
811 | ||
812 | if (latch) | |
813 | afec0 |= DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS); | |
814 | else | |
815 | afec0 &= ~DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS); | |
816 | ||
817 | DSI_PORT_WRITE(PHY_AFEC0, afec0); | |
818 | } | |
819 | ||
820 | /* Enters or exits Ultra Low Power State. */ | |
821 | static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps) | |
822 | { | |
823 | bool continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS; | |
824 | u32 phyc_ulps = ((continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) | | |
825 | DSI_PHYC_DLANE0_ULPS | | |
826 | (dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) | | |
827 | (dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) | | |
828 | (dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0)); | |
829 | u32 stat_ulps = ((continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) | | |
830 | DSI1_STAT_PHY_D0_ULPS | | |
831 | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) | | |
832 | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) | | |
833 | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0)); | |
834 | u32 stat_stop = ((continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) | | |
835 | DSI1_STAT_PHY_D0_STOP | | |
836 | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) | | |
837 | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) | | |
838 | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0)); | |
839 | int ret; | |
840 | ||
841 | DSI_PORT_WRITE(STAT, stat_ulps); | |
842 | DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps); | |
843 | ret = wait_for((DSI_PORT_READ(STAT) & stat_ulps) == stat_ulps, 200); | |
844 | if (ret) { | |
845 | dev_warn(&dsi->pdev->dev, | |
846 | "Timeout waiting for DSI ULPS entry: STAT 0x%08x", | |
847 | DSI_PORT_READ(STAT)); | |
848 | DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); | |
849 | vc4_dsi_latch_ulps(dsi, false); | |
850 | return; | |
851 | } | |
852 | ||
853 | /* The DSI module can't be disabled while the module is | |
854 | * generating ULPS state. So, to be able to disable the | |
855 | * module, we have the AFE latch the ULPS state and continue | |
856 | * on to having the module enter STOP. | |
857 | */ | |
858 | vc4_dsi_latch_ulps(dsi, ulps); | |
859 | ||
860 | DSI_PORT_WRITE(STAT, stat_stop); | |
861 | DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); | |
862 | ret = wait_for((DSI_PORT_READ(STAT) & stat_stop) == stat_stop, 200); | |
863 | if (ret) { | |
864 | dev_warn(&dsi->pdev->dev, | |
865 | "Timeout waiting for DSI STOP entry: STAT 0x%08x", | |
866 | DSI_PORT_READ(STAT)); | |
867 | DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); | |
868 | return; | |
869 | } | |
870 | } | |
871 | ||
872 | static u32 | |
873 | dsi_hs_timing(u32 ui_ns, u32 ns, u32 ui) | |
874 | { | |
875 | /* The HS timings have to be rounded up to a multiple of 8 | |
876 | * because we're using the byte clock. | |
877 | */ | |
878 | return roundup(ui + DIV_ROUND_UP(ns, ui_ns), 8); | |
879 | } | |
880 | ||
881 | /* ESC always runs at 100Mhz. */ | |
882 | #define ESC_TIME_NS 10 | |
883 | ||
884 | static u32 | |
885 | dsi_esc_timing(u32 ns) | |
886 | { | |
887 | return DIV_ROUND_UP(ns, ESC_TIME_NS); | |
888 | } | |
889 | ||
890 | static void vc4_dsi_encoder_disable(struct drm_encoder *encoder) | |
891 | { | |
892 | struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder); | |
893 | struct vc4_dsi *dsi = vc4_encoder->dsi; | |
894 | struct device *dev = &dsi->pdev->dev; | |
895 | ||
896 | drm_panel_disable(dsi->panel); | |
897 | ||
898 | vc4_dsi_ulps(dsi, true); | |
899 | ||
900 | drm_panel_unprepare(dsi->panel); | |
901 | ||
902 | clk_disable_unprepare(dsi->pll_phy_clock); | |
903 | clk_disable_unprepare(dsi->escape_clock); | |
904 | clk_disable_unprepare(dsi->pixel_clock); | |
905 | ||
906 | pm_runtime_put(dev); | |
907 | } | |
908 | ||
909 | static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) | |
910 | { | |
911 | struct drm_display_mode *mode = &encoder->crtc->mode; | |
912 | struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder); | |
913 | struct vc4_dsi *dsi = vc4_encoder->dsi; | |
914 | struct device *dev = &dsi->pdev->dev; | |
915 | u32 format = 0, divider = 0; | |
916 | bool debug_dump_regs = false; | |
917 | unsigned long hs_clock; | |
918 | u32 ui_ns; | |
919 | /* Minimum LP state duration in escape clock cycles. */ | |
920 | u32 lpx = dsi_esc_timing(60); | |
921 | unsigned long pixel_clock_hz = mode->clock * 1000; | |
922 | unsigned long dsip_clock; | |
923 | unsigned long phy_clock; | |
924 | int ret; | |
925 | ||
926 | ret = pm_runtime_get_sync(dev); | |
927 | if (ret) { | |
928 | DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port); | |
929 | return; | |
930 | } | |
931 | ||
932 | ret = drm_panel_prepare(dsi->panel); | |
933 | if (ret) { | |
934 | DRM_ERROR("Panel failed to prepare\n"); | |
935 | return; | |
936 | } | |
937 | ||
938 | if (debug_dump_regs) { | |
939 | DRM_INFO("DSI regs before:\n"); | |
940 | vc4_dsi_dump_regs(dsi); | |
941 | } | |
942 | ||
943 | switch (dsi->format) { | |
944 | case MIPI_DSI_FMT_RGB888: | |
945 | format = DSI_PFORMAT_RGB888; | |
946 | divider = 24 / dsi->lanes; | |
947 | break; | |
948 | case MIPI_DSI_FMT_RGB666: | |
949 | format = DSI_PFORMAT_RGB666; | |
950 | divider = 24 / dsi->lanes; | |
951 | break; | |
952 | case MIPI_DSI_FMT_RGB666_PACKED: | |
953 | format = DSI_PFORMAT_RGB666_PACKED; | |
954 | divider = 18 / dsi->lanes; | |
955 | break; | |
956 | case MIPI_DSI_FMT_RGB565: | |
957 | format = DSI_PFORMAT_RGB565; | |
958 | divider = 16 / dsi->lanes; | |
959 | break; | |
960 | } | |
961 | ||
962 | phy_clock = pixel_clock_hz * divider; | |
963 | ret = clk_set_rate(dsi->pll_phy_clock, phy_clock); | |
964 | if (ret) { | |
965 | dev_err(&dsi->pdev->dev, | |
966 | "Failed to set phy clock to %ld: %d\n", phy_clock, ret); | |
967 | } | |
968 | ||
969 | /* Reset the DSI and all its fifos. */ | |
970 | DSI_PORT_WRITE(CTRL, | |
971 | DSI_CTRL_SOFT_RESET_CFG | | |
972 | DSI_PORT_BIT(CTRL_RESET_FIFOS)); | |
973 | ||
974 | DSI_PORT_WRITE(CTRL, | |
975 | DSI_CTRL_HSDT_EOT_DISABLE | | |
976 | DSI_CTRL_RX_LPDT_EOT_DISABLE); | |
977 | ||
978 | /* Clear all stat bits so we see what has happened during enable. */ | |
979 | DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT)); | |
980 | ||
981 | /* Set AFE CTR00/CTR1 to release powerdown of analog. */ | |
982 | if (dsi->port == 0) { | |
983 | u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | | |
984 | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ)); | |
985 | ||
986 | if (dsi->lanes < 2) | |
987 | afec0 |= DSI0_PHY_AFEC0_PD_DLANE1; | |
988 | ||
989 | if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) | |
990 | afec0 |= DSI0_PHY_AFEC0_RESET; | |
991 | ||
992 | DSI_PORT_WRITE(PHY_AFEC0, afec0); | |
993 | ||
994 | DSI_PORT_WRITE(PHY_AFEC1, | |
995 | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) | | |
996 | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) | | |
997 | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_CLANE)); | |
998 | } else { | |
999 | u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | | |
1000 | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ) | | |
1001 | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_CLANE) | | |
1002 | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE0) | | |
1003 | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE1) | | |
1004 | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE2) | | |
1005 | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE3)); | |
1006 | ||
1007 | if (dsi->lanes < 4) | |
1008 | afec0 |= DSI1_PHY_AFEC0_PD_DLANE3; | |
1009 | if (dsi->lanes < 3) | |
1010 | afec0 |= DSI1_PHY_AFEC0_PD_DLANE2; | |
1011 | if (dsi->lanes < 2) | |
1012 | afec0 |= DSI1_PHY_AFEC0_PD_DLANE1; | |
1013 | ||
1014 | afec0 |= DSI1_PHY_AFEC0_RESET; | |
1015 | ||
1016 | DSI_PORT_WRITE(PHY_AFEC0, afec0); | |
1017 | ||
1018 | DSI_PORT_WRITE(PHY_AFEC1, 0); | |
1019 | ||
1020 | /* AFEC reset hold time */ | |
1021 | mdelay(1); | |
1022 | } | |
1023 | ||
1024 | ret = clk_prepare_enable(dsi->escape_clock); | |
1025 | if (ret) { | |
1026 | DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret); | |
1027 | return; | |
1028 | } | |
1029 | ||
1030 | ret = clk_prepare_enable(dsi->pll_phy_clock); | |
1031 | if (ret) { | |
1032 | DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret); | |
1033 | return; | |
1034 | } | |
1035 | ||
1036 | hs_clock = clk_get_rate(dsi->pll_phy_clock); | |
1037 | ||
1038 | /* Yes, we set the DSI0P/DSI1P pixel clock to the byte rate, | |
1039 | * not the pixel clock rate. DSIxP take from the APHY's byte, | |
1040 | * DDR2, or DDR4 clock (we use byte) and feed into the PV at | |
1041 | * that rate. Separately, a value derived from PIX_CLK_DIV | |
1042 | * and HS_CLKC is fed into the PV to divide down to the actual | |
1043 | * pixel clock for pushing pixels into DSI. | |
1044 | */ | |
1045 | dsip_clock = phy_clock / 8; | |
1046 | ret = clk_set_rate(dsi->pixel_clock, dsip_clock); | |
1047 | if (ret) { | |
1048 | dev_err(dev, "Failed to set pixel clock to %ldHz: %d\n", | |
1049 | dsip_clock, ret); | |
1050 | } | |
1051 | ||
1052 | ret = clk_prepare_enable(dsi->pixel_clock); | |
1053 | if (ret) { | |
1054 | DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret); | |
1055 | return; | |
1056 | } | |
1057 | ||
1058 | /* How many ns one DSI unit interval is. Note that the clock | |
1059 | * is DDR, so there's an extra divide by 2. | |
1060 | */ | |
1061 | ui_ns = DIV_ROUND_UP(500000000, hs_clock); | |
1062 | ||
1063 | DSI_PORT_WRITE(HS_CLT0, | |
1064 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 262, 0), | |
1065 | DSI_HS_CLT0_CZERO) | | |
1066 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 0, 8), | |
1067 | DSI_HS_CLT0_CPRE) | | |
1068 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 38, 0), | |
1069 | DSI_HS_CLT0_CPREP)); | |
1070 | ||
1071 | DSI_PORT_WRITE(HS_CLT1, | |
1072 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 0), | |
1073 | DSI_HS_CLT1_CTRAIL) | | |
1074 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 52), | |
1075 | DSI_HS_CLT1_CPOST)); | |
1076 | ||
1077 | DSI_PORT_WRITE(HS_CLT2, | |
1078 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000000, 0), | |
1079 | DSI_HS_CLT2_WUP)); | |
1080 | ||
1081 | DSI_PORT_WRITE(HS_DLT3, | |
1082 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 100, 0), | |
1083 | DSI_HS_DLT3_EXIT) | | |
1084 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 105, 6), | |
1085 | DSI_HS_DLT3_ZERO) | | |
1086 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 40, 4), | |
1087 | DSI_HS_DLT3_PRE)); | |
1088 | ||
1089 | DSI_PORT_WRITE(HS_DLT4, | |
1090 | VC4_SET_FIELD(dsi_hs_timing(ui_ns, lpx * ESC_TIME_NS, 0), | |
1091 | DSI_HS_DLT4_LPX) | | |
1092 | VC4_SET_FIELD(max(dsi_hs_timing(ui_ns, 0, 8), | |
1093 | dsi_hs_timing(ui_ns, 60, 4)), | |
1094 | DSI_HS_DLT4_TRAIL) | | |
1095 | VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT)); | |
1096 | ||
1097 | DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000, 5000), | |
1098 | DSI_HS_DLT5_INIT)); | |
1099 | ||
1100 | DSI_PORT_WRITE(HS_DLT6, | |
1101 | VC4_SET_FIELD(lpx * 5, DSI_HS_DLT6_TA_GET) | | |
1102 | VC4_SET_FIELD(lpx, DSI_HS_DLT6_TA_SURE) | | |
1103 | VC4_SET_FIELD(lpx * 4, DSI_HS_DLT6_TA_GO) | | |
1104 | VC4_SET_FIELD(lpx, DSI_HS_DLT6_LP_LPX)); | |
1105 | ||
1106 | DSI_PORT_WRITE(HS_DLT7, | |
1107 | VC4_SET_FIELD(dsi_esc_timing(1000000), | |
1108 | DSI_HS_DLT7_LP_WUP)); | |
1109 | ||
1110 | DSI_PORT_WRITE(PHYC, | |
1111 | DSI_PHYC_DLANE0_ENABLE | | |
1112 | (dsi->lanes >= 2 ? DSI_PHYC_DLANE1_ENABLE : 0) | | |
1113 | (dsi->lanes >= 3 ? DSI_PHYC_DLANE2_ENABLE : 0) | | |
1114 | (dsi->lanes >= 4 ? DSI_PHYC_DLANE3_ENABLE : 0) | | |
1115 | DSI_PORT_BIT(PHYC_CLANE_ENABLE) | | |
1116 | ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? | |
1117 | 0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) | | |
1118 | (dsi->port == 0 ? | |
1119 | VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) : | |
1120 | VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT))); | |
1121 | ||
1122 | DSI_PORT_WRITE(CTRL, | |
1123 | DSI_PORT_READ(CTRL) | | |
1124 | DSI_CTRL_CAL_BYTE); | |
1125 | ||
1126 | /* HS timeout in HS clock cycles: disabled. */ | |
1127 | DSI_PORT_WRITE(HSTX_TO_CNT, 0); | |
1128 | /* LP receive timeout in HS clocks. */ | |
1129 | DSI_PORT_WRITE(LPRX_TO_CNT, 0xffffff); | |
1130 | /* Bus turnaround timeout */ | |
1131 | DSI_PORT_WRITE(TA_TO_CNT, 100000); | |
1132 | /* Display reset sequence timeout */ | |
1133 | DSI_PORT_WRITE(PR_TO_CNT, 100000); | |
1134 | ||
1135 | if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { | |
1136 | DSI_PORT_WRITE(DISP0_CTRL, | |
1137 | VC4_SET_FIELD(divider, DSI_DISP0_PIX_CLK_DIV) | | |
1138 | VC4_SET_FIELD(format, DSI_DISP0_PFORMAT) | | |
1139 | VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME, | |
1140 | DSI_DISP0_LP_STOP_CTRL) | | |
1141 | DSI_DISP0_ST_END | | |
1142 | DSI_DISP0_ENABLE); | |
1143 | } else { | |
1144 | DSI_PORT_WRITE(DISP0_CTRL, | |
1145 | DSI_DISP0_COMMAND_MODE | | |
1146 | DSI_DISP0_ENABLE); | |
1147 | } | |
1148 | ||
1149 | /* Set up DISP1 for transferring long command payloads through | |
1150 | * the pixfifo. | |
1151 | */ | |
1152 | DSI_PORT_WRITE(DISP1_CTRL, | |
1153 | VC4_SET_FIELD(DSI_DISP1_PFORMAT_32BIT_LE, | |
1154 | DSI_DISP1_PFORMAT) | | |
1155 | DSI_DISP1_ENABLE); | |
1156 | ||
1157 | /* Ungate the block. */ | |
1158 | if (dsi->port == 0) | |
1159 | DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0); | |
1160 | else | |
1161 | DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN); | |
1162 | ||
1163 | /* Bring AFE out of reset. */ | |
1164 | if (dsi->port == 0) { | |
1165 | } else { | |
1166 | DSI_PORT_WRITE(PHY_AFEC0, | |
1167 | DSI_PORT_READ(PHY_AFEC0) & | |
1168 | ~DSI1_PHY_AFEC0_RESET); | |
1169 | } | |
1170 | ||
1171 | vc4_dsi_ulps(dsi, false); | |
1172 | ||
1173 | if (debug_dump_regs) { | |
1174 | DRM_INFO("DSI regs after:\n"); | |
1175 | vc4_dsi_dump_regs(dsi); | |
1176 | } | |
1177 | ||
1178 | ret = drm_panel_enable(dsi->panel); | |
1179 | if (ret) { | |
1180 | DRM_ERROR("Panel failed to enable\n"); | |
1181 | drm_panel_unprepare(dsi->panel); | |
1182 | return; | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host, | |
1187 | const struct mipi_dsi_msg *msg) | |
1188 | { | |
1189 | struct vc4_dsi *dsi = host_to_dsi(host); | |
1190 | struct mipi_dsi_packet packet; | |
1191 | u32 pkth = 0, pktc = 0; | |
1192 | int i, ret; | |
1193 | bool is_long = mipi_dsi_packet_format_is_long(msg->type); | |
1194 | u32 cmd_fifo_len = 0, pix_fifo_len = 0; | |
1195 | ||
1196 | mipi_dsi_create_packet(&packet, msg); | |
1197 | ||
1198 | pkth |= VC4_SET_FIELD(packet.header[0], DSI_TXPKT1H_BC_DT); | |
1199 | pkth |= VC4_SET_FIELD(packet.header[1] | | |
1200 | (packet.header[2] << 8), | |
1201 | DSI_TXPKT1H_BC_PARAM); | |
1202 | if (is_long) { | |
1203 | /* Divide data across the various FIFOs we have available. | |
1204 | * The command FIFO takes byte-oriented data, but is of | |
1205 | * limited size. The pixel FIFO (never actually used for | |
1206 | * pixel data in reality) is word oriented, and substantially | |
1207 | * larger. So, we use the pixel FIFO for most of the data, | |
1208 | * sending the residual bytes in the command FIFO at the start. | |
1209 | * | |
1210 | * With this arrangement, the command FIFO will never get full. | |
1211 | */ | |
1212 | if (packet.payload_length <= 16) { | |
1213 | cmd_fifo_len = packet.payload_length; | |
1214 | pix_fifo_len = 0; | |
1215 | } else { | |
1216 | cmd_fifo_len = (packet.payload_length % | |
1217 | DSI_PIX_FIFO_WIDTH); | |
1218 | pix_fifo_len = ((packet.payload_length - cmd_fifo_len) / | |
1219 | DSI_PIX_FIFO_WIDTH); | |
1220 | } | |
1221 | ||
1222 | WARN_ON_ONCE(pix_fifo_len >= DSI_PIX_FIFO_DEPTH); | |
1223 | ||
1224 | pkth |= VC4_SET_FIELD(cmd_fifo_len, DSI_TXPKT1H_BC_CMDFIFO); | |
1225 | } | |
1226 | ||
1227 | if (msg->rx_len) { | |
1228 | pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_RX, | |
1229 | DSI_TXPKT1C_CMD_CTRL); | |
1230 | } else { | |
1231 | pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_TX, | |
1232 | DSI_TXPKT1C_CMD_CTRL); | |
1233 | } | |
1234 | ||
1235 | for (i = 0; i < cmd_fifo_len; i++) | |
1236 | DSI_PORT_WRITE(TXPKT_CMD_FIFO, packet.payload[i]); | |
1237 | for (i = 0; i < pix_fifo_len; i++) { | |
1238 | const u8 *pix = packet.payload + cmd_fifo_len + i * 4; | |
1239 | ||
1240 | DSI_PORT_WRITE(TXPKT_PIX_FIFO, | |
1241 | pix[0] | | |
1242 | pix[1] << 8 | | |
1243 | pix[2] << 16 | | |
1244 | pix[3] << 24); | |
1245 | } | |
1246 | ||
1247 | if (msg->flags & MIPI_DSI_MSG_USE_LPM) | |
1248 | pktc |= DSI_TXPKT1C_CMD_MODE_LP; | |
1249 | if (is_long) | |
1250 | pktc |= DSI_TXPKT1C_CMD_TYPE_LONG; | |
1251 | ||
1252 | /* Send one copy of the packet. Larger repeats are used for pixel | |
1253 | * data in command mode. | |
1254 | */ | |
1255 | pktc |= VC4_SET_FIELD(1, DSI_TXPKT1C_CMD_REPEAT); | |
1256 | ||
1257 | pktc |= DSI_TXPKT1C_CMD_EN; | |
1258 | if (pix_fifo_len) { | |
1259 | pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SECONDARY, | |
1260 | DSI_TXPKT1C_DISPLAY_NO); | |
1261 | } else { | |
1262 | pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SHORT, | |
1263 | DSI_TXPKT1C_DISPLAY_NO); | |
1264 | } | |
1265 | ||
1266 | /* Enable the appropriate interrupt for the transfer completion. */ | |
1267 | dsi->xfer_result = 0; | |
1268 | reinit_completion(&dsi->xfer_completion); | |
1269 | DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); | |
1270 | if (msg->rx_len) { | |
1271 | DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | | |
1272 | DSI1_INT_PHY_DIR_RTF)); | |
1273 | } else { | |
1274 | DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | | |
1275 | DSI1_INT_TXPKT1_DONE)); | |
1276 | } | |
1277 | ||
1278 | /* Send the packet. */ | |
1279 | DSI_PORT_WRITE(TXPKT1H, pkth); | |
1280 | DSI_PORT_WRITE(TXPKT1C, pktc); | |
1281 | ||
1282 | if (!wait_for_completion_timeout(&dsi->xfer_completion, | |
1283 | msecs_to_jiffies(1000))) { | |
1284 | dev_err(&dsi->pdev->dev, "transfer interrupt wait timeout"); | |
1285 | dev_err(&dsi->pdev->dev, "instat: 0x%08x\n", | |
1286 | DSI_PORT_READ(INT_STAT)); | |
1287 | ret = -ETIMEDOUT; | |
1288 | } else { | |
1289 | ret = dsi->xfer_result; | |
1290 | } | |
1291 | ||
1292 | DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); | |
1293 | ||
1294 | if (ret) | |
1295 | goto reset_fifo_and_return; | |
1296 | ||
1297 | if (ret == 0 && msg->rx_len) { | |
1298 | u32 rxpkt1h = DSI_PORT_READ(RXPKT1H); | |
1299 | u8 *msg_rx = msg->rx_buf; | |
1300 | ||
1301 | if (rxpkt1h & DSI_RXPKT1H_PKT_TYPE_LONG) { | |
1302 | u32 rxlen = VC4_GET_FIELD(rxpkt1h, | |
1303 | DSI_RXPKT1H_BC_PARAM); | |
1304 | ||
1305 | if (rxlen != msg->rx_len) { | |
1306 | DRM_ERROR("DSI returned %db, expecting %db\n", | |
1307 | rxlen, (int)msg->rx_len); | |
1308 | ret = -ENXIO; | |
1309 | goto reset_fifo_and_return; | |
1310 | } | |
1311 | ||
1312 | for (i = 0; i < msg->rx_len; i++) | |
1313 | msg_rx[i] = DSI_READ(DSI1_RXPKT_FIFO); | |
1314 | } else { | |
1315 | /* FINISHME: Handle AWER */ | |
1316 | ||
1317 | msg_rx[0] = VC4_GET_FIELD(rxpkt1h, | |
1318 | DSI_RXPKT1H_SHORT_0); | |
1319 | if (msg->rx_len > 1) { | |
1320 | msg_rx[1] = VC4_GET_FIELD(rxpkt1h, | |
1321 | DSI_RXPKT1H_SHORT_1); | |
1322 | } | |
1323 | } | |
1324 | } | |
1325 | ||
1326 | return ret; | |
1327 | ||
1328 | reset_fifo_and_return: | |
1329 | DRM_ERROR("DSI transfer failed, resetting: %d\n", ret); | |
1330 | ||
1331 | DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN); | |
1332 | udelay(1); | |
1333 | DSI_PORT_WRITE(CTRL, | |
1334 | DSI_PORT_READ(CTRL) | | |
1335 | DSI_PORT_BIT(CTRL_RESET_FIFOS)); | |
1336 | ||
1337 | DSI_PORT_WRITE(TXPKT1C, 0); | |
1338 | DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); | |
1339 | return ret; | |
1340 | } | |
1341 | ||
1342 | static int vc4_dsi_host_attach(struct mipi_dsi_host *host, | |
1343 | struct mipi_dsi_device *device) | |
1344 | { | |
1345 | struct vc4_dsi *dsi = host_to_dsi(host); | |
1346 | int ret = 0; | |
1347 | ||
1348 | dsi->lanes = device->lanes; | |
1349 | dsi->channel = device->channel; | |
1350 | dsi->format = device->format; | |
1351 | dsi->mode_flags = device->mode_flags; | |
1352 | ||
1353 | if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) { | |
1354 | dev_err(&dsi->pdev->dev, | |
1355 | "Only VIDEO mode panels supported currently.\n"); | |
1356 | return 0; | |
1357 | } | |
1358 | ||
1359 | dsi->panel = of_drm_find_panel(device->dev.of_node); | |
1360 | if (!dsi->panel) | |
1361 | return 0; | |
1362 | ||
1363 | ret = drm_panel_attach(dsi->panel, dsi->connector); | |
1364 | if (ret != 0) | |
1365 | return ret; | |
1366 | ||
1367 | drm_helper_hpd_irq_event(dsi->connector->dev); | |
1368 | ||
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | static int vc4_dsi_host_detach(struct mipi_dsi_host *host, | |
1373 | struct mipi_dsi_device *device) | |
1374 | { | |
1375 | struct vc4_dsi *dsi = host_to_dsi(host); | |
1376 | ||
1377 | if (dsi->panel) { | |
1378 | int ret = drm_panel_detach(dsi->panel); | |
1379 | ||
1380 | if (ret) | |
1381 | return ret; | |
1382 | ||
1383 | dsi->panel = NULL; | |
1384 | ||
1385 | drm_helper_hpd_irq_event(dsi->connector->dev); | |
1386 | } | |
1387 | ||
1388 | return 0; | |
1389 | } | |
1390 | ||
1391 | static const struct mipi_dsi_host_ops vc4_dsi_host_ops = { | |
1392 | .attach = vc4_dsi_host_attach, | |
1393 | .detach = vc4_dsi_host_detach, | |
1394 | .transfer = vc4_dsi_host_transfer, | |
1395 | }; | |
1396 | ||
1397 | static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = { | |
1398 | .disable = vc4_dsi_encoder_disable, | |
1399 | .enable = vc4_dsi_encoder_enable, | |
1400 | }; | |
1401 | ||
1402 | static const struct of_device_id vc4_dsi_dt_match[] = { | |
1403 | { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 }, | |
1404 | {} | |
1405 | }; | |
1406 | ||
1407 | static void dsi_handle_error(struct vc4_dsi *dsi, | |
1408 | irqreturn_t *ret, u32 stat, u32 bit, | |
1409 | const char *type) | |
1410 | { | |
1411 | if (!(stat & bit)) | |
1412 | return; | |
1413 | ||
1414 | DRM_ERROR("DSI%d: %s error\n", dsi->port, type); | |
1415 | *ret = IRQ_HANDLED; | |
1416 | } | |
1417 | ||
1418 | static irqreturn_t vc4_dsi_irq_handler(int irq, void *data) | |
1419 | { | |
1420 | struct vc4_dsi *dsi = data; | |
1421 | u32 stat = DSI_PORT_READ(INT_STAT); | |
1422 | irqreturn_t ret = IRQ_NONE; | |
1423 | ||
1424 | DSI_PORT_WRITE(INT_STAT, stat); | |
1425 | ||
1426 | dsi_handle_error(dsi, &ret, stat, | |
1427 | DSI1_INT_ERR_SYNC_ESC, "LPDT sync"); | |
1428 | dsi_handle_error(dsi, &ret, stat, | |
1429 | DSI1_INT_ERR_CONTROL, "data lane 0 sequence"); | |
1430 | dsi_handle_error(dsi, &ret, stat, | |
1431 | DSI1_INT_ERR_CONT_LP0, "LP0 contention"); | |
1432 | dsi_handle_error(dsi, &ret, stat, | |
1433 | DSI1_INT_ERR_CONT_LP1, "LP1 contention"); | |
1434 | dsi_handle_error(dsi, &ret, stat, | |
1435 | DSI1_INT_HSTX_TO, "HSTX timeout"); | |
1436 | dsi_handle_error(dsi, &ret, stat, | |
1437 | DSI1_INT_LPRX_TO, "LPRX timeout"); | |
1438 | dsi_handle_error(dsi, &ret, stat, | |
1439 | DSI1_INT_TA_TO, "turnaround timeout"); | |
1440 | dsi_handle_error(dsi, &ret, stat, | |
1441 | DSI1_INT_PR_TO, "peripheral reset timeout"); | |
1442 | ||
1443 | if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) { | |
1444 | complete(&dsi->xfer_completion); | |
1445 | ret = IRQ_HANDLED; | |
1446 | } else if (stat & DSI1_INT_HSTX_TO) { | |
1447 | complete(&dsi->xfer_completion); | |
1448 | dsi->xfer_result = -ETIMEDOUT; | |
1449 | ret = IRQ_HANDLED; | |
1450 | } | |
1451 | ||
1452 | return ret; | |
1453 | } | |
1454 | ||
1455 | /** | |
72f793f1 EA |
1456 | * vc4_dsi_init_phy_clocks - Exposes clocks generated by the analog |
1457 | * PHY that are consumed by CPRMAN (clk-bcm2835.c). | |
1458 | * @dsi: DSI encoder | |
4078f575 EA |
1459 | */ |
1460 | static int | |
1461 | vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) | |
1462 | { | |
1463 | struct device *dev = &dsi->pdev->dev; | |
1464 | const char *parent_name = __clk_get_name(dsi->pll_phy_clock); | |
1465 | static const struct { | |
1466 | const char *dsi0_name, *dsi1_name; | |
1467 | int div; | |
1468 | } phy_clocks[] = { | |
1469 | { "dsi0_byte", "dsi1_byte", 8 }, | |
1470 | { "dsi0_ddr2", "dsi1_ddr2", 4 }, | |
1471 | { "dsi0_ddr", "dsi1_ddr", 2 }, | |
1472 | }; | |
1473 | int i; | |
1474 | ||
1475 | dsi->clk_onecell = devm_kzalloc(dev, | |
1476 | sizeof(*dsi->clk_onecell) + | |
1477 | ARRAY_SIZE(phy_clocks) * | |
1478 | sizeof(struct clk_hw *), | |
1479 | GFP_KERNEL); | |
1480 | if (!dsi->clk_onecell) | |
1481 | return -ENOMEM; | |
1482 | dsi->clk_onecell->num = ARRAY_SIZE(phy_clocks); | |
1483 | ||
1484 | for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) { | |
1485 | struct clk_fixed_factor *fix = &dsi->phy_clocks[i]; | |
1486 | struct clk_init_data init; | |
1487 | int ret; | |
1488 | ||
1489 | /* We just use core fixed factor clock ops for the PHY | |
1490 | * clocks. The clocks are actually gated by the | |
1491 | * PHY_AFEC0_DDRCLK_EN bits, which we should be | |
1492 | * setting if we use the DDR/DDR2 clocks. However, | |
1493 | * vc4_dsi_encoder_enable() is setting up both AFEC0, | |
1494 | * setting both our parent DSI PLL's rate and this | |
1495 | * clock's rate, so it knows if DDR/DDR2 are going to | |
1496 | * be used and could enable the gates itself. | |
1497 | */ | |
1498 | fix->mult = 1; | |
1499 | fix->div = phy_clocks[i].div; | |
1500 | fix->hw.init = &init; | |
1501 | ||
1502 | memset(&init, 0, sizeof(init)); | |
1503 | init.parent_names = &parent_name; | |
1504 | init.num_parents = 1; | |
1505 | if (dsi->port == 1) | |
1506 | init.name = phy_clocks[i].dsi1_name; | |
1507 | else | |
1508 | init.name = phy_clocks[i].dsi0_name; | |
1509 | init.ops = &clk_fixed_factor_ops; | |
1510 | ||
1511 | ret = devm_clk_hw_register(dev, &fix->hw); | |
1512 | if (ret) | |
1513 | return ret; | |
1514 | ||
1515 | dsi->clk_onecell->hws[i] = &fix->hw; | |
1516 | } | |
1517 | ||
1518 | return of_clk_add_hw_provider(dev->of_node, | |
1519 | of_clk_hw_onecell_get, | |
1520 | dsi->clk_onecell); | |
1521 | } | |
1522 | ||
1523 | static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) | |
1524 | { | |
1525 | struct platform_device *pdev = to_platform_device(dev); | |
1526 | struct drm_device *drm = dev_get_drvdata(master); | |
1527 | struct vc4_dev *vc4 = to_vc4_dev(drm); | |
1528 | struct vc4_dsi *dsi; | |
1529 | struct vc4_dsi_encoder *vc4_dsi_encoder; | |
1530 | const struct of_device_id *match; | |
1531 | dma_cap_mask_t dma_mask; | |
1532 | int ret; | |
1533 | ||
1534 | dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); | |
1535 | if (!dsi) | |
1536 | return -ENOMEM; | |
1537 | ||
1538 | match = of_match_device(vc4_dsi_dt_match, dev); | |
1539 | if (!match) | |
1540 | return -ENODEV; | |
1541 | ||
1542 | dsi->port = (uintptr_t)match->data; | |
1543 | ||
1544 | vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder), | |
1545 | GFP_KERNEL); | |
1546 | if (!vc4_dsi_encoder) | |
1547 | return -ENOMEM; | |
1548 | vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1; | |
1549 | vc4_dsi_encoder->dsi = dsi; | |
1550 | dsi->encoder = &vc4_dsi_encoder->base.base; | |
1551 | ||
1552 | dsi->pdev = pdev; | |
1553 | dsi->regs = vc4_ioremap_regs(pdev, 0); | |
1554 | if (IS_ERR(dsi->regs)) | |
1555 | return PTR_ERR(dsi->regs); | |
1556 | ||
1557 | if (DSI_PORT_READ(ID) != DSI_ID_VALUE) { | |
1558 | dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", | |
1559 | DSI_PORT_READ(ID), DSI_ID_VALUE); | |
1560 | return -ENODEV; | |
1561 | } | |
1562 | ||
1563 | /* DSI1 has a broken AXI slave that doesn't respond to writes | |
1564 | * from the ARM. It does handle writes from the DMA engine, | |
1565 | * so set up a channel for talking to it. | |
1566 | */ | |
1567 | if (dsi->port == 1) { | |
1568 | dsi->reg_dma_mem = dma_alloc_coherent(dev, 4, | |
1569 | &dsi->reg_dma_paddr, | |
1570 | GFP_KERNEL); | |
1571 | if (!dsi->reg_dma_mem) { | |
1572 | DRM_ERROR("Failed to get DMA memory\n"); | |
1573 | return -ENOMEM; | |
1574 | } | |
1575 | ||
1576 | dma_cap_zero(dma_mask); | |
1577 | dma_cap_set(DMA_MEMCPY, dma_mask); | |
1578 | dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask); | |
1579 | if (IS_ERR(dsi->reg_dma_chan)) { | |
1580 | ret = PTR_ERR(dsi->reg_dma_chan); | |
1581 | if (ret != -EPROBE_DEFER) | |
1582 | DRM_ERROR("Failed to get DMA channel: %d\n", | |
1583 | ret); | |
1584 | return ret; | |
1585 | } | |
1586 | ||
1587 | /* Get the physical address of the device's registers. The | |
1588 | * struct resource for the regs gives us the bus address | |
1589 | * instead. | |
1590 | */ | |
1591 | dsi->reg_paddr = be32_to_cpup(of_get_address(dev->of_node, | |
1592 | 0, NULL, NULL)); | |
1593 | } | |
1594 | ||
1595 | init_completion(&dsi->xfer_completion); | |
1596 | /* At startup enable error-reporting interrupts and nothing else. */ | |
1597 | DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); | |
1598 | /* Clear any existing interrupt state. */ | |
1599 | DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT)); | |
1600 | ||
1601 | ret = devm_request_irq(dev, platform_get_irq(pdev, 0), | |
1602 | vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); | |
1603 | if (ret) { | |
1604 | if (ret != -EPROBE_DEFER) | |
1605 | dev_err(dev, "Failed to get interrupt: %d\n", ret); | |
1606 | return ret; | |
1607 | } | |
1608 | ||
1609 | dsi->escape_clock = devm_clk_get(dev, "escape"); | |
1610 | if (IS_ERR(dsi->escape_clock)) { | |
1611 | ret = PTR_ERR(dsi->escape_clock); | |
1612 | if (ret != -EPROBE_DEFER) | |
1613 | dev_err(dev, "Failed to get escape clock: %d\n", ret); | |
1614 | return ret; | |
1615 | } | |
1616 | ||
1617 | dsi->pll_phy_clock = devm_clk_get(dev, "phy"); | |
1618 | if (IS_ERR(dsi->pll_phy_clock)) { | |
1619 | ret = PTR_ERR(dsi->pll_phy_clock); | |
1620 | if (ret != -EPROBE_DEFER) | |
1621 | dev_err(dev, "Failed to get phy clock: %d\n", ret); | |
1622 | return ret; | |
1623 | } | |
1624 | ||
1625 | dsi->pixel_clock = devm_clk_get(dev, "pixel"); | |
1626 | if (IS_ERR(dsi->pixel_clock)) { | |
1627 | ret = PTR_ERR(dsi->pixel_clock); | |
1628 | if (ret != -EPROBE_DEFER) | |
1629 | dev_err(dev, "Failed to get pixel clock: %d\n", ret); | |
1630 | return ret; | |
1631 | } | |
1632 | ||
1633 | /* The esc clock rate is supposed to always be 100Mhz. */ | |
1634 | ret = clk_set_rate(dsi->escape_clock, 100 * 1000000); | |
1635 | if (ret) { | |
1636 | dev_err(dev, "Failed to set esc clock: %d\n", ret); | |
1637 | return ret; | |
1638 | } | |
1639 | ||
1640 | ret = vc4_dsi_init_phy_clocks(dsi); | |
1641 | if (ret) | |
1642 | return ret; | |
1643 | ||
1644 | if (dsi->port == 1) | |
1645 | vc4->dsi1 = dsi; | |
1646 | ||
1647 | drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs, | |
1648 | DRM_MODE_ENCODER_DSI, NULL); | |
1649 | drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); | |
1650 | ||
1651 | dsi->connector = vc4_dsi_connector_init(drm, dsi); | |
1652 | if (IS_ERR(dsi->connector)) { | |
1653 | ret = PTR_ERR(dsi->connector); | |
1654 | goto err_destroy_encoder; | |
1655 | } | |
1656 | ||
1657 | dsi->dsi_host.ops = &vc4_dsi_host_ops; | |
1658 | dsi->dsi_host.dev = dev; | |
1659 | ||
1660 | mipi_dsi_host_register(&dsi->dsi_host); | |
1661 | ||
1662 | dev_set_drvdata(dev, dsi); | |
1663 | ||
1664 | pm_runtime_enable(dev); | |
1665 | ||
1666 | return 0; | |
1667 | ||
1668 | err_destroy_encoder: | |
1669 | vc4_dsi_encoder_destroy(dsi->encoder); | |
1670 | ||
1671 | return ret; | |
1672 | } | |
1673 | ||
1674 | static void vc4_dsi_unbind(struct device *dev, struct device *master, | |
1675 | void *data) | |
1676 | { | |
1677 | struct drm_device *drm = dev_get_drvdata(master); | |
1678 | struct vc4_dev *vc4 = to_vc4_dev(drm); | |
1679 | struct vc4_dsi *dsi = dev_get_drvdata(dev); | |
1680 | ||
1681 | pm_runtime_disable(dev); | |
1682 | ||
1683 | vc4_dsi_connector_destroy(dsi->connector); | |
1684 | vc4_dsi_encoder_destroy(dsi->encoder); | |
1685 | ||
1686 | mipi_dsi_host_unregister(&dsi->dsi_host); | |
1687 | ||
1688 | clk_disable_unprepare(dsi->pll_phy_clock); | |
1689 | clk_disable_unprepare(dsi->escape_clock); | |
1690 | ||
1691 | if (dsi->port == 1) | |
1692 | vc4->dsi1 = NULL; | |
1693 | } | |
1694 | ||
1695 | static const struct component_ops vc4_dsi_ops = { | |
1696 | .bind = vc4_dsi_bind, | |
1697 | .unbind = vc4_dsi_unbind, | |
1698 | }; | |
1699 | ||
1700 | static int vc4_dsi_dev_probe(struct platform_device *pdev) | |
1701 | { | |
1702 | return component_add(&pdev->dev, &vc4_dsi_ops); | |
1703 | } | |
1704 | ||
1705 | static int vc4_dsi_dev_remove(struct platform_device *pdev) | |
1706 | { | |
1707 | component_del(&pdev->dev, &vc4_dsi_ops); | |
1708 | return 0; | |
1709 | } | |
1710 | ||
1711 | struct platform_driver vc4_dsi_driver = { | |
1712 | .probe = vc4_dsi_dev_probe, | |
1713 | .remove = vc4_dsi_dev_remove, | |
1714 | .driver = { | |
1715 | .name = "vc4_dsi", | |
1716 | .of_match_table = vc4_dsi_dt_match, | |
1717 | }, | |
1718 | }; |