]>
Commit | Line | Data |
---|---|---|
44cfc623 GG |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * i.MX8 NWL MIPI DSI host driver | |
4 | * | |
5 | * Copyright (C) 2017 NXP | |
6 | * Copyright (C) 2020 Purism SPC | |
7 | */ | |
8 | ||
9 | #include <linux/bitfield.h> | |
29733e77 | 10 | #include <linux/bits.h> |
44cfc623 GG |
11 | #include <linux/clk.h> |
12 | #include <linux/irq.h> | |
13 | #include <linux/math64.h> | |
14 | #include <linux/mfd/syscon.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/mux/consumer.h> | |
17 | #include <linux/of.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/phy/phy.h> | |
20 | #include <linux/regmap.h> | |
21 | #include <linux/reset.h> | |
22 | #include <linux/sys_soc.h> | |
23 | #include <linux/time64.h> | |
24 | ||
3afb2a28 | 25 | #include <drm/drm_atomic_state_helper.h> |
44cfc623 GG |
26 | #include <drm/drm_bridge.h> |
27 | #include <drm/drm_mipi_dsi.h> | |
28 | #include <drm/drm_of.h> | |
29 | #include <drm/drm_panel.h> | |
30 | #include <drm/drm_print.h> | |
31 | ||
32 | #include <video/mipi_display.h> | |
33 | ||
34 | #include "nwl-dsi.h" | |
35 | ||
36 | #define DRV_NAME "nwl-dsi" | |
37 | ||
38 | /* i.MX8 NWL quirks */ | |
39 | /* i.MX8MQ errata E11418 */ | |
40 | #define E11418_HS_MODE_QUIRK BIT(0) | |
41 | ||
42 | #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500) | |
43 | ||
44 | enum transfer_direction { | |
45 | DSI_PACKET_SEND, | |
46 | DSI_PACKET_RECEIVE, | |
47 | }; | |
48 | ||
49 | #define NWL_DSI_ENDPOINT_LCDIF 0 | |
50 | #define NWL_DSI_ENDPOINT_DCSS 1 | |
51 | ||
44cfc623 GG |
52 | struct nwl_dsi_transfer { |
53 | const struct mipi_dsi_msg *msg; | |
54 | struct mipi_dsi_packet packet; | |
55 | struct completion completed; | |
56 | ||
57 | int status; /* status of transmission */ | |
58 | enum transfer_direction direction; | |
59 | bool need_bta; | |
60 | u8 cmd; | |
61 | u16 rx_word_count; | |
62 | size_t tx_len; /* in bytes */ | |
63 | size_t rx_len; /* in bytes */ | |
64 | }; | |
65 | ||
66 | struct nwl_dsi { | |
67 | struct drm_bridge bridge; | |
68 | struct mipi_dsi_host dsi_host; | |
69 | struct drm_bridge *panel_bridge; | |
70 | struct device *dev; | |
71 | struct phy *phy; | |
72 | union phy_configure_opts phy_cfg; | |
73 | unsigned int quirks; | |
74 | ||
75 | struct regmap *regmap; | |
76 | int irq; | |
77 | /* | |
78 | * The DSI host controller needs this reset sequence according to NWL: | |
79 | * 1. Deassert pclk reset to get access to DSI regs | |
80 | * 2. Configure DSI Host and DPHY and enable DPHY | |
81 | * 3. Deassert ESC and BYTE resets to allow host TX operations) | |
82 | * 4. Send DSI cmds to configure peripheral (handled by panel drv) | |
83 | * 5. Deassert DPI reset so DPI receives pixels and starts sending | |
84 | * DSI data | |
85 | * | |
86 | * TODO: Since panel_bridges do their DSI setup in enable we | |
87 | * currently have 4. and 5. swapped. | |
88 | */ | |
89 | struct reset_control *rst_byte; | |
90 | struct reset_control *rst_esc; | |
91 | struct reset_control *rst_dpi; | |
92 | struct reset_control *rst_pclk; | |
93 | struct mux_control *mux; | |
94 | ||
95 | /* DSI clocks */ | |
96 | struct clk *phy_ref_clk; | |
97 | struct clk *rx_esc_clk; | |
98 | struct clk *tx_esc_clk; | |
99 | struct clk *core_clk; | |
100 | /* | |
101 | * hardware bug: the i.MX8MQ needs this clock on during reset | |
102 | * even when not using LCDIF. | |
103 | */ | |
104 | struct clk *lcdif_clk; | |
105 | ||
106 | /* dsi lanes */ | |
107 | u32 lanes; | |
108 | enum mipi_dsi_pixel_format format; | |
109 | struct drm_display_mode mode; | |
110 | unsigned long dsi_mode_flags; | |
111 | int error; | |
112 | ||
113 | struct nwl_dsi_transfer *xfer; | |
114 | }; | |
115 | ||
116 | static const struct regmap_config nwl_dsi_regmap_config = { | |
117 | .reg_bits = 16, | |
118 | .val_bits = 32, | |
119 | .reg_stride = 4, | |
120 | .max_register = NWL_DSI_IRQ_MASK2, | |
121 | .name = DRV_NAME, | |
122 | }; | |
123 | ||
124 | static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge) | |
125 | { | |
126 | return container_of(bridge, struct nwl_dsi, bridge); | |
127 | } | |
128 | ||
129 | static int nwl_dsi_clear_error(struct nwl_dsi *dsi) | |
130 | { | |
131 | int ret = dsi->error; | |
132 | ||
133 | dsi->error = 0; | |
134 | return ret; | |
135 | } | |
136 | ||
137 | static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val) | |
138 | { | |
139 | int ret; | |
140 | ||
141 | if (dsi->error) | |
142 | return; | |
143 | ||
144 | ret = regmap_write(dsi->regmap, reg, val); | |
145 | if (ret < 0) { | |
146 | DRM_DEV_ERROR(dsi->dev, | |
147 | "Failed to write NWL DSI reg 0x%x: %d\n", reg, | |
148 | ret); | |
149 | dsi->error = ret; | |
150 | } | |
151 | } | |
152 | ||
153 | static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg) | |
154 | { | |
155 | unsigned int val; | |
156 | int ret; | |
157 | ||
158 | if (dsi->error) | |
159 | return 0; | |
160 | ||
161 | ret = regmap_read(dsi->regmap, reg, &val); | |
162 | if (ret < 0) { | |
163 | DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n", | |
164 | reg, ret); | |
165 | dsi->error = ret; | |
166 | } | |
167 | return val; | |
168 | } | |
169 | ||
170 | static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format) | |
171 | { | |
172 | switch (format) { | |
173 | case MIPI_DSI_FMT_RGB565: | |
174 | return NWL_DSI_PIXEL_FORMAT_16; | |
175 | case MIPI_DSI_FMT_RGB666: | |
176 | return NWL_DSI_PIXEL_FORMAT_18L; | |
177 | case MIPI_DSI_FMT_RGB666_PACKED: | |
178 | return NWL_DSI_PIXEL_FORMAT_18; | |
179 | case MIPI_DSI_FMT_RGB888: | |
180 | return NWL_DSI_PIXEL_FORMAT_24; | |
181 | default: | |
182 | return -EINVAL; | |
183 | } | |
184 | } | |
185 | ||
186 | /* | |
187 | * ps2bc - Picoseconds to byte clock cycles | |
188 | */ | |
189 | static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) | |
190 | { | |
191 | u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); | |
192 | ||
193 | return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, | |
47956bc8 | 194 | dsi->lanes * 8ULL * NSEC_PER_SEC); |
44cfc623 GG |
195 | } |
196 | ||
197 | /* | |
198 | * ui2bc - UI time periods to byte clock cycles | |
199 | */ | |
29733e77 | 200 | static u32 ui2bc(unsigned int ui) |
44cfc623 | 201 | { |
29733e77 | 202 | return DIV_ROUND_UP(ui, BITS_PER_BYTE); |
44cfc623 GG |
203 | } |
204 | ||
205 | /* | |
206 | * us2bc - micro seconds to lp clock cycles | |
207 | */ | |
208 | static u32 us2lp(u32 lp_clk_rate, unsigned long us) | |
209 | { | |
210 | return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC); | |
211 | } | |
212 | ||
213 | static int nwl_dsi_config_host(struct nwl_dsi *dsi) | |
214 | { | |
215 | u32 cycles; | |
216 | struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy; | |
217 | ||
218 | if (dsi->lanes < 1 || dsi->lanes > 4) | |
219 | return -EINVAL; | |
220 | ||
221 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes); | |
222 | nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1); | |
223 | ||
224 | if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { | |
225 | nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01); | |
226 | nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01); | |
227 | } else { | |
228 | nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00); | |
229 | nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00); | |
230 | } | |
231 | ||
232 | /* values in byte clock cycles */ | |
29733e77 | 233 | cycles = ui2bc(cfg->clk_pre); |
44cfc623 GG |
234 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); |
235 | nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); | |
236 | cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); | |
237 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); | |
29733e77 | 238 | cycles += ui2bc(cfg->clk_pre); |
44cfc623 GG |
239 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); |
240 | nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); | |
241 | cycles = ps2bc(dsi, cfg->hs_exit); | |
242 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles); | |
243 | nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles); | |
244 | ||
245 | nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01); | |
246 | nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00); | |
247 | nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00); | |
248 | nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00); | |
249 | /* In LP clock cycles */ | |
250 | cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup); | |
251 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles); | |
252 | nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles); | |
253 | ||
254 | return nwl_dsi_clear_error(dsi); | |
255 | } | |
256 | ||
257 | static int nwl_dsi_config_dpi(struct nwl_dsi *dsi) | |
258 | { | |
259 | u32 mode; | |
260 | int color_format; | |
261 | bool burst_mode; | |
262 | int hfront_porch, hback_porch, vfront_porch, vback_porch; | |
263 | int hsync_len, vsync_len; | |
264 | ||
265 | hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay; | |
266 | hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start; | |
267 | hback_porch = dsi->mode.htotal - dsi->mode.hsync_end; | |
268 | ||
269 | vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay; | |
270 | vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start; | |
271 | vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end; | |
272 | ||
273 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch); | |
274 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch); | |
275 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len); | |
276 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay); | |
277 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch); | |
278 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch); | |
279 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len); | |
280 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay); | |
281 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock); | |
282 | ||
283 | color_format = nwl_dsi_get_dpi_pixel_format(dsi->format); | |
284 | if (color_format < 0) { | |
285 | DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n", | |
286 | dsi->format); | |
287 | return color_format; | |
288 | } | |
289 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format); | |
290 | ||
291 | nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT); | |
292 | nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format); | |
293 | /* | |
294 | * Adjusting input polarity based on the video mode results in | |
295 | * a black screen so always pick active low: | |
296 | */ | |
297 | nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY, | |
298 | NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW); | |
299 | nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY, | |
300 | NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW); | |
301 | ||
302 | burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && | |
303 | !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE); | |
304 | ||
305 | if (burst_mode) { | |
306 | nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE); | |
307 | nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256); | |
308 | } else { | |
309 | mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ? | |
310 | NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES : | |
311 | NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS); | |
312 | nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode); | |
313 | nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, | |
314 | dsi->mode.hdisplay); | |
315 | } | |
316 | ||
317 | nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch); | |
318 | nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch); | |
319 | nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len); | |
320 | ||
321 | nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0); | |
322 | nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1); | |
323 | nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0); | |
324 | nwl_dsi_write(dsi, NWL_DSI_VC, 0x0); | |
325 | ||
326 | nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay); | |
327 | nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1); | |
328 | nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch); | |
329 | nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch); | |
330 | ||
331 | return nwl_dsi_clear_error(dsi); | |
332 | } | |
333 | ||
334 | static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi) | |
335 | { | |
336 | u32 irq_enable; | |
337 | ||
338 | nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff); | |
339 | nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7); | |
340 | ||
341 | irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK | | |
342 | NWL_DSI_RX_PKT_HDR_RCVD_MASK | | |
343 | NWL_DSI_TX_FIFO_OVFLW_MASK | | |
344 | NWL_DSI_HS_TX_TIMEOUT_MASK); | |
345 | ||
346 | nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable); | |
347 | ||
348 | return nwl_dsi_clear_error(dsi); | |
349 | } | |
350 | ||
351 | static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host, | |
352 | struct mipi_dsi_device *device) | |
353 | { | |
354 | struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); | |
355 | struct device *dev = dsi->dev; | |
356 | ||
357 | DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes, | |
358 | device->format, device->mode_flags); | |
359 | ||
360 | if (device->lanes < 1 || device->lanes > 4) | |
361 | return -EINVAL; | |
362 | ||
363 | dsi->lanes = device->lanes; | |
364 | dsi->format = device->format; | |
365 | dsi->dsi_mode_flags = device->mode_flags; | |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
370 | static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) | |
371 | { | |
372 | struct device *dev = dsi->dev; | |
373 | struct nwl_dsi_transfer *xfer = dsi->xfer; | |
374 | int err; | |
375 | u8 *payload = xfer->msg->rx_buf; | |
376 | u32 val; | |
377 | u16 word_count; | |
378 | u8 channel; | |
379 | u8 data_type; | |
380 | ||
381 | xfer->status = 0; | |
382 | ||
383 | if (xfer->rx_word_count == 0) { | |
384 | if (!(status & NWL_DSI_RX_PKT_HDR_RCVD)) | |
385 | return false; | |
386 | /* Get the RX header and parse it */ | |
387 | val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER); | |
388 | err = nwl_dsi_clear_error(dsi); | |
389 | if (err) | |
390 | xfer->status = err; | |
391 | word_count = NWL_DSI_WC(val); | |
392 | channel = NWL_DSI_RX_VC(val); | |
393 | data_type = NWL_DSI_RX_DT(val); | |
394 | ||
395 | if (channel != xfer->msg->channel) { | |
396 | DRM_DEV_ERROR(dev, | |
397 | "[%02X] Channel mismatch (%u != %u)\n", | |
398 | xfer->cmd, channel, xfer->msg->channel); | |
399 | xfer->status = -EINVAL; | |
400 | return true; | |
401 | } | |
402 | ||
403 | switch (data_type) { | |
404 | case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: | |
44cfc623 GG |
405 | case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: |
406 | if (xfer->msg->rx_len > 1) { | |
407 | /* read second byte */ | |
408 | payload[1] = word_count >> 8; | |
409 | ++xfer->rx_len; | |
410 | } | |
411 | fallthrough; | |
412 | case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: | |
44cfc623 GG |
413 | case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: |
414 | if (xfer->msg->rx_len > 0) { | |
415 | /* read first byte */ | |
416 | payload[0] = word_count & 0xff; | |
417 | ++xfer->rx_len; | |
418 | } | |
419 | xfer->status = xfer->rx_len; | |
420 | return true; | |
421 | case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: | |
422 | word_count &= 0xff; | |
423 | DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n", | |
424 | xfer->cmd, word_count); | |
425 | xfer->status = -EPROTO; | |
426 | return true; | |
427 | } | |
428 | ||
429 | if (word_count > xfer->msg->rx_len) { | |
430 | DRM_DEV_ERROR(dev, | |
431 | "[%02X] Receive buffer too small: %zu (< %u)\n", | |
432 | xfer->cmd, xfer->msg->rx_len, word_count); | |
433 | xfer->status = -EINVAL; | |
434 | return true; | |
435 | } | |
436 | ||
437 | xfer->rx_word_count = word_count; | |
438 | } else { | |
439 | /* Set word_count from previous header read */ | |
440 | word_count = xfer->rx_word_count; | |
441 | } | |
442 | ||
443 | /* If RX payload is not yet received, wait for it */ | |
444 | if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)) | |
445 | return false; | |
446 | ||
447 | /* Read the RX payload */ | |
448 | while (word_count >= 4) { | |
449 | val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); | |
450 | payload[0] = (val >> 0) & 0xff; | |
451 | payload[1] = (val >> 8) & 0xff; | |
452 | payload[2] = (val >> 16) & 0xff; | |
453 | payload[3] = (val >> 24) & 0xff; | |
454 | payload += 4; | |
455 | xfer->rx_len += 4; | |
456 | word_count -= 4; | |
457 | } | |
458 | ||
459 | if (word_count > 0) { | |
460 | val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); | |
461 | switch (word_count) { | |
462 | case 3: | |
463 | payload[2] = (val >> 16) & 0xff; | |
464 | ++xfer->rx_len; | |
465 | fallthrough; | |
466 | case 2: | |
467 | payload[1] = (val >> 8) & 0xff; | |
468 | ++xfer->rx_len; | |
469 | fallthrough; | |
470 | case 1: | |
471 | payload[0] = (val >> 0) & 0xff; | |
472 | ++xfer->rx_len; | |
473 | break; | |
474 | } | |
475 | } | |
476 | ||
477 | xfer->status = xfer->rx_len; | |
478 | err = nwl_dsi_clear_error(dsi); | |
479 | if (err) | |
480 | xfer->status = err; | |
481 | ||
482 | return true; | |
483 | } | |
484 | ||
485 | static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status) | |
486 | { | |
487 | struct nwl_dsi_transfer *xfer = dsi->xfer; | |
488 | bool end_packet = false; | |
489 | ||
490 | if (!xfer) | |
491 | return; | |
492 | ||
493 | if (xfer->direction == DSI_PACKET_SEND && | |
494 | status & NWL_DSI_TX_PKT_DONE) { | |
495 | xfer->status = xfer->tx_len; | |
496 | end_packet = true; | |
497 | } else if (status & NWL_DSI_DPHY_DIRECTION && | |
498 | ((status & (NWL_DSI_RX_PKT_HDR_RCVD | | |
499 | NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) { | |
500 | end_packet = nwl_dsi_read_packet(dsi, status); | |
501 | } | |
502 | ||
503 | if (end_packet) | |
504 | complete(&xfer->completed); | |
505 | } | |
506 | ||
507 | static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi) | |
508 | { | |
509 | struct nwl_dsi_transfer *xfer = dsi->xfer; | |
510 | struct mipi_dsi_packet *pkt = &xfer->packet; | |
511 | const u8 *payload; | |
512 | size_t length; | |
513 | u16 word_count; | |
514 | u8 hs_mode; | |
515 | u32 val; | |
516 | u32 hs_workaround = 0; | |
517 | ||
518 | /* Send the payload, if any */ | |
519 | length = pkt->payload_length; | |
520 | payload = pkt->payload; | |
521 | ||
522 | while (length >= 4) { | |
523 | val = *(u32 *)payload; | |
524 | hs_workaround |= !(val & 0xFFFF00); | |
525 | nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); | |
526 | payload += 4; | |
527 | length -= 4; | |
528 | } | |
529 | /* Send the rest of the payload */ | |
530 | val = 0; | |
531 | switch (length) { | |
532 | case 3: | |
533 | val |= payload[2] << 16; | |
534 | fallthrough; | |
535 | case 2: | |
536 | val |= payload[1] << 8; | |
537 | hs_workaround |= !(val & 0xFFFF00); | |
538 | fallthrough; | |
539 | case 1: | |
540 | val |= payload[0]; | |
541 | nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); | |
542 | break; | |
543 | } | |
544 | xfer->tx_len = pkt->payload_length; | |
545 | ||
546 | /* | |
547 | * Send the header | |
548 | * header[0] = Virtual Channel + Data Type | |
549 | * header[1] = Word Count LSB (LP) or first param (SP) | |
550 | * header[2] = Word Count MSB (LP) or second param (SP) | |
551 | */ | |
552 | word_count = pkt->header[1] | (pkt->header[2] << 8); | |
553 | if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) { | |
554 | DRM_DEV_DEBUG_DRIVER(dsi->dev, | |
555 | "Using hs mode workaround for cmd 0x%x\n", | |
556 | xfer->cmd); | |
557 | hs_mode = 1; | |
558 | } else { | |
559 | hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1; | |
560 | } | |
561 | val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) | | |
562 | NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) | | |
563 | NWL_DSI_BTA_TX(xfer->need_bta); | |
564 | nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val); | |
565 | ||
566 | /* Send packet command */ | |
567 | nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1); | |
568 | } | |
569 | ||
570 | static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host, | |
571 | const struct mipi_dsi_msg *msg) | |
572 | { | |
573 | struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); | |
574 | struct nwl_dsi_transfer xfer; | |
575 | ssize_t ret = 0; | |
576 | ||
577 | /* Create packet to be sent */ | |
578 | dsi->xfer = &xfer; | |
579 | ret = mipi_dsi_create_packet(&xfer.packet, msg); | |
580 | if (ret < 0) { | |
581 | dsi->xfer = NULL; | |
582 | return ret; | |
583 | } | |
584 | ||
585 | if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM || | |
586 | msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM || | |
587 | msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM || | |
588 | msg->type & MIPI_DSI_DCS_READ) && | |
589 | msg->rx_len > 0 && msg->rx_buf) | |
590 | xfer.direction = DSI_PACKET_RECEIVE; | |
591 | else | |
592 | xfer.direction = DSI_PACKET_SEND; | |
593 | ||
594 | xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE); | |
595 | xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0; | |
596 | xfer.msg = msg; | |
597 | xfer.status = -ETIMEDOUT; | |
598 | xfer.rx_word_count = 0; | |
599 | xfer.rx_len = 0; | |
600 | xfer.cmd = 0x00; | |
601 | if (msg->tx_len > 0) | |
602 | xfer.cmd = ((u8 *)(msg->tx_buf))[0]; | |
603 | init_completion(&xfer.completed); | |
604 | ||
605 | ret = clk_prepare_enable(dsi->rx_esc_clk); | |
606 | if (ret < 0) { | |
607 | DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n", | |
608 | ret); | |
609 | return ret; | |
610 | } | |
611 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n", | |
612 | clk_get_rate(dsi->rx_esc_clk)); | |
613 | ||
614 | /* Initiate the DSI packet transmision */ | |
615 | nwl_dsi_begin_transmission(dsi); | |
616 | ||
617 | if (!wait_for_completion_timeout(&xfer.completed, | |
618 | NWL_DSI_MIPI_FIFO_TIMEOUT)) { | |
619 | DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n", | |
620 | xfer.cmd); | |
621 | ret = -ETIMEDOUT; | |
622 | } else { | |
623 | ret = xfer.status; | |
624 | } | |
625 | ||
626 | clk_disable_unprepare(dsi->rx_esc_clk); | |
627 | ||
628 | return ret; | |
629 | } | |
630 | ||
631 | static const struct mipi_dsi_host_ops nwl_dsi_host_ops = { | |
632 | .attach = nwl_dsi_host_attach, | |
633 | .transfer = nwl_dsi_host_transfer, | |
634 | }; | |
635 | ||
636 | static irqreturn_t nwl_dsi_irq_handler(int irq, void *data) | |
637 | { | |
638 | u32 irq_status; | |
639 | struct nwl_dsi *dsi = data; | |
640 | ||
641 | irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS); | |
642 | ||
643 | if (irq_status & NWL_DSI_TX_FIFO_OVFLW) | |
644 | DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n"); | |
645 | ||
646 | if (irq_status & NWL_DSI_HS_TX_TIMEOUT) | |
647 | DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n"); | |
648 | ||
649 | if (irq_status & NWL_DSI_TX_PKT_DONE || | |
650 | irq_status & NWL_DSI_RX_PKT_HDR_RCVD || | |
651 | irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD) | |
652 | nwl_dsi_finish_transmission(dsi, irq_status); | |
653 | ||
654 | return IRQ_HANDLED; | |
655 | } | |
656 | ||
88581137 | 657 | static int nwl_dsi_mode_set(struct nwl_dsi *dsi) |
44cfc623 GG |
658 | { |
659 | struct device *dev = dsi->dev; | |
660 | union phy_configure_opts *phy_cfg = &dsi->phy_cfg; | |
661 | int ret; | |
662 | ||
663 | if (!dsi->lanes) { | |
664 | DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes); | |
665 | return -EINVAL; | |
666 | } | |
667 | ||
668 | ret = phy_init(dsi->phy); | |
669 | if (ret < 0) { | |
670 | DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret); | |
671 | return ret; | |
672 | } | |
673 | ||
674 | ret = phy_configure(dsi->phy, phy_cfg); | |
675 | if (ret < 0) { | |
676 | DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret); | |
677 | goto uninit_phy; | |
678 | } | |
679 | ||
680 | ret = clk_prepare_enable(dsi->tx_esc_clk); | |
681 | if (ret < 0) { | |
682 | DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n", | |
683 | ret); | |
684 | goto uninit_phy; | |
685 | } | |
686 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n", | |
687 | clk_get_rate(dsi->tx_esc_clk)); | |
688 | ||
689 | ret = nwl_dsi_config_host(dsi); | |
690 | if (ret < 0) { | |
691 | DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret); | |
692 | goto disable_clock; | |
693 | } | |
694 | ||
695 | ret = nwl_dsi_config_dpi(dsi); | |
696 | if (ret < 0) { | |
697 | DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret); | |
698 | goto disable_clock; | |
699 | } | |
700 | ||
701 | ret = phy_power_on(dsi->phy); | |
702 | if (ret < 0) { | |
703 | DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret); | |
704 | goto disable_clock; | |
705 | } | |
706 | ||
707 | ret = nwl_dsi_init_interrupts(dsi); | |
708 | if (ret < 0) | |
709 | goto power_off_phy; | |
710 | ||
711 | return ret; | |
712 | ||
713 | power_off_phy: | |
714 | phy_power_off(dsi->phy); | |
715 | disable_clock: | |
716 | clk_disable_unprepare(dsi->tx_esc_clk); | |
717 | uninit_phy: | |
718 | phy_exit(dsi->phy); | |
719 | ||
720 | return ret; | |
721 | } | |
722 | ||
723 | static int nwl_dsi_disable(struct nwl_dsi *dsi) | |
724 | { | |
725 | struct device *dev = dsi->dev; | |
726 | ||
727 | DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n"); | |
728 | ||
729 | phy_power_off(dsi->phy); | |
730 | phy_exit(dsi->phy); | |
731 | ||
732 | /* Disabling the clock before the phy breaks enabling dsi again */ | |
733 | clk_disable_unprepare(dsi->tx_esc_clk); | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
3afb2a28 LY |
738 | static void |
739 | nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge, | |
740 | struct drm_bridge_state *old_bridge_state) | |
44cfc623 GG |
741 | { |
742 | struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
743 | int ret; | |
744 | ||
745 | nwl_dsi_disable(dsi); | |
746 | ||
747 | ret = reset_control_assert(dsi->rst_dpi); | |
748 | if (ret < 0) { | |
749 | DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret); | |
750 | return; | |
751 | } | |
752 | ret = reset_control_assert(dsi->rst_byte); | |
753 | if (ret < 0) { | |
754 | DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret); | |
755 | return; | |
756 | } | |
757 | ret = reset_control_assert(dsi->rst_esc); | |
758 | if (ret < 0) { | |
759 | DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret); | |
760 | return; | |
761 | } | |
762 | ret = reset_control_assert(dsi->rst_pclk); | |
763 | if (ret < 0) { | |
764 | DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret); | |
765 | return; | |
766 | } | |
767 | ||
768 | clk_disable_unprepare(dsi->core_clk); | |
769 | clk_disable_unprepare(dsi->lcdif_clk); | |
770 | ||
771 | pm_runtime_put(dsi->dev); | |
772 | } | |
773 | ||
774 | static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi, | |
775 | const struct drm_display_mode *mode, | |
776 | union phy_configure_opts *phy_opts) | |
777 | { | |
778 | unsigned long rate; | |
779 | int ret; | |
780 | ||
781 | if (dsi->lanes < 1 || dsi->lanes > 4) | |
782 | return -EINVAL; | |
783 | ||
784 | /* | |
785 | * So far the DPHY spec minimal timings work for both mixel | |
786 | * dphy and nwl dsi host | |
787 | */ | |
788 | ret = phy_mipi_dphy_get_default_config(mode->clock * 1000, | |
789 | mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes, | |
790 | &phy_opts->mipi_dphy); | |
791 | if (ret < 0) | |
792 | return ret; | |
793 | ||
794 | rate = clk_get_rate(dsi->tx_esc_clk); | |
795 | DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate); | |
796 | phy_opts->mipi_dphy.lp_clk_rate = rate; | |
797 | ||
798 | return 0; | |
799 | } | |
800 | ||
44cfc623 GG |
801 | static enum drm_mode_status |
802 | nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge, | |
12c683e1 | 803 | const struct drm_display_info *info, |
44cfc623 GG |
804 | const struct drm_display_mode *mode) |
805 | { | |
806 | struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
807 | int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); | |
808 | ||
809 | if (mode->clock * bpp > 15000000 * dsi->lanes) | |
810 | return MODE_CLOCK_HIGH; | |
811 | ||
812 | if (mode->clock * bpp < 80000 * dsi->lanes) | |
813 | return MODE_CLOCK_LOW; | |
814 | ||
815 | return MODE_OK; | |
816 | } | |
817 | ||
3afb2a28 LY |
818 | static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge, |
819 | struct drm_bridge_state *bridge_state, | |
820 | struct drm_crtc_state *crtc_state, | |
821 | struct drm_connector_state *conn_state) | |
822 | { | |
823 | struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; | |
824 | ||
825 | /* At least LCDIF + NWL needs active high sync */ | |
826 | adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); | |
827 | adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); | |
828 | ||
88581137 LY |
829 | /* |
830 | * Do a full modeset if crtc_state->active is changed to be true. | |
831 | * This ensures our ->mode_set() is called to get the DSI controller | |
832 | * and the PHY ready to send DCS commands, when only the connector's | |
833 | * DPMS is brought out of "Off" status. | |
834 | */ | |
3afb2a28 LY |
835 | if (crtc_state->active_changed && crtc_state->active) |
836 | crtc_state->mode_changed = true; | |
837 | ||
838 | return 0; | |
839 | } | |
840 | ||
44cfc623 GG |
841 | static void |
842 | nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, | |
843 | const struct drm_display_mode *mode, | |
844 | const struct drm_display_mode *adjusted_mode) | |
845 | { | |
846 | struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
847 | struct device *dev = dsi->dev; | |
848 | union phy_configure_opts new_cfg; | |
849 | unsigned long phy_ref_rate; | |
850 | int ret; | |
851 | ||
852 | ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg); | |
853 | if (ret < 0) | |
854 | return; | |
855 | ||
44cfc623 GG |
856 | phy_ref_rate = clk_get_rate(dsi->phy_ref_clk); |
857 | DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate); | |
858 | /* Save the new desired phy config */ | |
859 | memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg)); | |
860 | ||
861 | memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); | |
862 | drm_mode_debug_printmodeline(adjusted_mode); | |
44cfc623 | 863 | |
90abbf7a YL |
864 | if (pm_runtime_resume_and_get(dev) < 0) |
865 | return; | |
44cfc623 GG |
866 | |
867 | if (clk_prepare_enable(dsi->lcdif_clk) < 0) | |
90abbf7a | 868 | goto runtime_put; |
44cfc623 | 869 | if (clk_prepare_enable(dsi->core_clk) < 0) |
90abbf7a | 870 | goto runtime_put; |
44cfc623 GG |
871 | |
872 | /* Step 1 from DSI reset-out instructions */ | |
873 | ret = reset_control_deassert(dsi->rst_pclk); | |
874 | if (ret < 0) { | |
88581137 | 875 | DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); |
90abbf7a | 876 | goto runtime_put; |
44cfc623 GG |
877 | } |
878 | ||
879 | /* Step 2 from DSI reset-out instructions */ | |
88581137 | 880 | nwl_dsi_mode_set(dsi); |
44cfc623 GG |
881 | |
882 | /* Step 3 from DSI reset-out instructions */ | |
883 | ret = reset_control_deassert(dsi->rst_esc); | |
884 | if (ret < 0) { | |
88581137 | 885 | DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); |
90abbf7a | 886 | goto runtime_put; |
44cfc623 GG |
887 | } |
888 | ret = reset_control_deassert(dsi->rst_byte); | |
889 | if (ret < 0) { | |
88581137 | 890 | DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); |
90abbf7a | 891 | goto runtime_put; |
44cfc623 | 892 | } |
90abbf7a YL |
893 | |
894 | return; | |
895 | ||
896 | runtime_put: | |
897 | pm_runtime_put_sync(dev); | |
44cfc623 GG |
898 | } |
899 | ||
3afb2a28 LY |
900 | static void |
901 | nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge, | |
902 | struct drm_bridge_state *old_bridge_state) | |
44cfc623 GG |
903 | { |
904 | struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
905 | int ret; | |
906 | ||
907 | /* Step 5 from DSI reset-out instructions */ | |
908 | ret = reset_control_deassert(dsi->rst_dpi); | |
909 | if (ret < 0) | |
910 | DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret); | |
911 | } | |
912 | ||
913 | static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, | |
914 | enum drm_bridge_attach_flags flags) | |
915 | { | |
916 | struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
917 | struct drm_bridge *panel_bridge; | |
918 | struct drm_panel *panel; | |
919 | int ret; | |
920 | ||
44cfc623 GG |
921 | ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, |
922 | &panel_bridge); | |
923 | if (ret) | |
924 | return ret; | |
925 | ||
926 | if (panel) { | |
927 | panel_bridge = drm_panel_bridge_add(panel); | |
928 | if (IS_ERR(panel_bridge)) | |
929 | return PTR_ERR(panel_bridge); | |
930 | } | |
931 | dsi->panel_bridge = panel_bridge; | |
932 | ||
933 | if (!dsi->panel_bridge) | |
934 | return -EPROBE_DEFER; | |
935 | ||
936 | return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, | |
937 | flags); | |
938 | } | |
939 | ||
940 | static void nwl_dsi_bridge_detach(struct drm_bridge *bridge) | |
941 | { struct nwl_dsi *dsi = bridge_to_dsi(bridge); | |
942 | ||
943 | drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0); | |
944 | } | |
945 | ||
494eb277 GG |
946 | static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, |
947 | struct drm_bridge_state *bridge_state, | |
948 | struct drm_crtc_state *crtc_state, | |
949 | struct drm_connector_state *conn_state, | |
950 | u32 output_fmt, | |
951 | unsigned int *num_input_fmts) | |
952 | { | |
953 | u32 *input_fmts, input_fmt; | |
954 | ||
955 | *num_input_fmts = 0; | |
956 | ||
957 | switch (output_fmt) { | |
958 | /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */ | |
959 | case MEDIA_BUS_FMT_FIXED: | |
960 | input_fmt = MEDIA_BUS_FMT_RGB888_1X24; | |
961 | break; | |
962 | case MEDIA_BUS_FMT_RGB888_1X24: | |
963 | case MEDIA_BUS_FMT_RGB666_1X18: | |
964 | case MEDIA_BUS_FMT_RGB565_1X16: | |
965 | input_fmt = output_fmt; | |
966 | break; | |
967 | default: | |
968 | return NULL; | |
969 | } | |
970 | ||
971 | input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); | |
972 | if (!input_fmts) | |
973 | return NULL; | |
974 | input_fmts[0] = input_fmt; | |
975 | *num_input_fmts = 1; | |
976 | ||
977 | return input_fmts; | |
978 | } | |
979 | ||
44cfc623 | 980 | static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { |
3afb2a28 LY |
981 | .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, |
982 | .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, | |
983 | .atomic_reset = drm_atomic_helper_bridge_reset, | |
984 | .atomic_check = nwl_dsi_bridge_atomic_check, | |
3afb2a28 LY |
985 | .atomic_enable = nwl_dsi_bridge_atomic_enable, |
986 | .atomic_disable = nwl_dsi_bridge_atomic_disable, | |
494eb277 | 987 | .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts, |
3afb2a28 LY |
988 | .mode_set = nwl_dsi_bridge_mode_set, |
989 | .mode_valid = nwl_dsi_bridge_mode_valid, | |
990 | .attach = nwl_dsi_bridge_attach, | |
991 | .detach = nwl_dsi_bridge_detach, | |
44cfc623 GG |
992 | }; |
993 | ||
994 | static int nwl_dsi_parse_dt(struct nwl_dsi *dsi) | |
995 | { | |
996 | struct platform_device *pdev = to_platform_device(dsi->dev); | |
997 | struct clk *clk; | |
998 | void __iomem *base; | |
999 | int ret; | |
1000 | ||
1001 | dsi->phy = devm_phy_get(dsi->dev, "dphy"); | |
1002 | if (IS_ERR(dsi->phy)) { | |
1003 | ret = PTR_ERR(dsi->phy); | |
1004 | if (ret != -EPROBE_DEFER) | |
1005 | DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret); | |
1006 | return ret; | |
1007 | } | |
1008 | ||
1009 | clk = devm_clk_get(dsi->dev, "lcdif"); | |
1010 | if (IS_ERR(clk)) { | |
1011 | ret = PTR_ERR(clk); | |
1012 | DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n", | |
1013 | ret); | |
1014 | return ret; | |
1015 | } | |
1016 | dsi->lcdif_clk = clk; | |
1017 | ||
1018 | clk = devm_clk_get(dsi->dev, "core"); | |
1019 | if (IS_ERR(clk)) { | |
1020 | ret = PTR_ERR(clk); | |
1021 | DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n", | |
1022 | ret); | |
1023 | return ret; | |
1024 | } | |
1025 | dsi->core_clk = clk; | |
1026 | ||
1027 | clk = devm_clk_get(dsi->dev, "phy_ref"); | |
1028 | if (IS_ERR(clk)) { | |
1029 | ret = PTR_ERR(clk); | |
1030 | DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n", | |
1031 | ret); | |
1032 | return ret; | |
1033 | } | |
1034 | dsi->phy_ref_clk = clk; | |
1035 | ||
1036 | clk = devm_clk_get(dsi->dev, "rx_esc"); | |
1037 | if (IS_ERR(clk)) { | |
1038 | ret = PTR_ERR(clk); | |
1039 | DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n", | |
1040 | ret); | |
1041 | return ret; | |
1042 | } | |
1043 | dsi->rx_esc_clk = clk; | |
1044 | ||
1045 | clk = devm_clk_get(dsi->dev, "tx_esc"); | |
1046 | if (IS_ERR(clk)) { | |
1047 | ret = PTR_ERR(clk); | |
1048 | DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n", | |
1049 | ret); | |
1050 | return ret; | |
1051 | } | |
1052 | dsi->tx_esc_clk = clk; | |
1053 | ||
1054 | dsi->mux = devm_mux_control_get(dsi->dev, NULL); | |
1055 | if (IS_ERR(dsi->mux)) { | |
1056 | ret = PTR_ERR(dsi->mux); | |
1057 | if (ret != -EPROBE_DEFER) | |
1058 | DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret); | |
1059 | return ret; | |
1060 | } | |
1061 | ||
1062 | base = devm_platform_ioremap_resource(pdev, 0); | |
1063 | if (IS_ERR(base)) | |
1064 | return PTR_ERR(base); | |
1065 | ||
1066 | dsi->regmap = | |
1067 | devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config); | |
1068 | if (IS_ERR(dsi->regmap)) { | |
1069 | ret = PTR_ERR(dsi->regmap); | |
1070 | DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n", | |
1071 | ret); | |
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | dsi->irq = platform_get_irq(pdev, 0); | |
1076 | if (dsi->irq < 0) { | |
1077 | DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n", | |
1078 | dsi->irq); | |
1079 | return dsi->irq; | |
1080 | } | |
1081 | ||
1082 | dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk"); | |
1083 | if (IS_ERR(dsi->rst_pclk)) { | |
1084 | DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n", | |
1085 | PTR_ERR(dsi->rst_pclk)); | |
1086 | return PTR_ERR(dsi->rst_pclk); | |
1087 | } | |
1088 | dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte"); | |
1089 | if (IS_ERR(dsi->rst_byte)) { | |
1090 | DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n", | |
1091 | PTR_ERR(dsi->rst_byte)); | |
1092 | return PTR_ERR(dsi->rst_byte); | |
1093 | } | |
1094 | dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc"); | |
1095 | if (IS_ERR(dsi->rst_esc)) { | |
1096 | DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n", | |
1097 | PTR_ERR(dsi->rst_esc)); | |
1098 | return PTR_ERR(dsi->rst_esc); | |
1099 | } | |
1100 | dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi"); | |
1101 | if (IS_ERR(dsi->rst_dpi)) { | |
1102 | DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n", | |
1103 | PTR_ERR(dsi->rst_dpi)); | |
1104 | return PTR_ERR(dsi->rst_dpi); | |
1105 | } | |
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static int nwl_dsi_select_input(struct nwl_dsi *dsi) | |
1110 | { | |
1111 | struct device_node *remote; | |
1112 | u32 use_dcss = 1; | |
1113 | int ret; | |
1114 | ||
1115 | remote = of_graph_get_remote_node(dsi->dev->of_node, 0, | |
1116 | NWL_DSI_ENDPOINT_LCDIF); | |
1117 | if (remote) { | |
1118 | use_dcss = 0; | |
1119 | } else { | |
1120 | remote = of_graph_get_remote_node(dsi->dev->of_node, 0, | |
1121 | NWL_DSI_ENDPOINT_DCSS); | |
1122 | if (!remote) { | |
1123 | DRM_DEV_ERROR(dsi->dev, | |
1124 | "No valid input endpoint found\n"); | |
1125 | return -EINVAL; | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | DRM_DEV_INFO(dsi->dev, "Using %s as input source\n", | |
1130 | (use_dcss) ? "DCSS" : "LCDIF"); | |
1131 | ret = mux_control_try_select(dsi->mux, use_dcss); | |
1132 | if (ret < 0) | |
1133 | DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret); | |
1134 | ||
1135 | of_node_put(remote); | |
1136 | return ret; | |
1137 | } | |
1138 | ||
1139 | static int nwl_dsi_deselect_input(struct nwl_dsi *dsi) | |
1140 | { | |
1141 | int ret; | |
1142 | ||
1143 | ret = mux_control_deselect(dsi->mux); | |
1144 | if (ret < 0) | |
1145 | DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret); | |
1146 | ||
1147 | return ret; | |
1148 | } | |
1149 | ||
1150 | static const struct drm_bridge_timings nwl_dsi_timings = { | |
1151 | .input_bus_flags = DRM_BUS_FLAG_DE_LOW, | |
1152 | }; | |
1153 | ||
1154 | static const struct of_device_id nwl_dsi_dt_ids[] = { | |
1155 | { .compatible = "fsl,imx8mq-nwl-dsi", }, | |
1156 | { /* sentinel */ } | |
1157 | }; | |
1158 | MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids); | |
1159 | ||
1160 | static const struct soc_device_attribute nwl_dsi_quirks_match[] = { | |
1161 | { .soc_id = "i.MX8MQ", .revision = "2.0", | |
1162 | .data = (void *)E11418_HS_MODE_QUIRK }, | |
1163 | { /* sentinel. */ }, | |
1164 | }; | |
1165 | ||
1166 | static int nwl_dsi_probe(struct platform_device *pdev) | |
1167 | { | |
1168 | struct device *dev = &pdev->dev; | |
1169 | const struct soc_device_attribute *attr; | |
1170 | struct nwl_dsi *dsi; | |
1171 | int ret; | |
1172 | ||
1173 | dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); | |
1174 | if (!dsi) | |
1175 | return -ENOMEM; | |
1176 | ||
1177 | dsi->dev = dev; | |
1178 | ||
1179 | ret = nwl_dsi_parse_dt(dsi); | |
1180 | if (ret) | |
1181 | return ret; | |
1182 | ||
1183 | ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0, | |
1184 | dev_name(dev), dsi); | |
1185 | if (ret < 0) { | |
1186 | DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq, | |
1187 | ret); | |
1188 | return ret; | |
1189 | } | |
1190 | ||
1191 | dsi->dsi_host.ops = &nwl_dsi_host_ops; | |
1192 | dsi->dsi_host.dev = dev; | |
1193 | ret = mipi_dsi_host_register(&dsi->dsi_host); | |
1194 | if (ret) { | |
1195 | DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret); | |
1196 | return ret; | |
1197 | } | |
1198 | ||
1199 | attr = soc_device_match(nwl_dsi_quirks_match); | |
1200 | if (attr) | |
1201 | dsi->quirks = (uintptr_t)attr->data; | |
1202 | ||
1203 | dsi->bridge.driver_private = dsi; | |
1204 | dsi->bridge.funcs = &nwl_dsi_bridge_funcs; | |
1205 | dsi->bridge.of_node = dev->of_node; | |
1206 | dsi->bridge.timings = &nwl_dsi_timings; | |
1207 | ||
1208 | dev_set_drvdata(dev, dsi); | |
1209 | pm_runtime_enable(dev); | |
1210 | ||
1211 | ret = nwl_dsi_select_input(dsi); | |
1212 | if (ret < 0) { | |
c7b615d2 | 1213 | pm_runtime_disable(dev); |
44cfc623 GG |
1214 | mipi_dsi_host_unregister(&dsi->dsi_host); |
1215 | return ret; | |
1216 | } | |
1217 | ||
1218 | drm_bridge_add(&dsi->bridge); | |
1219 | return 0; | |
1220 | } | |
1221 | ||
1222 | static int nwl_dsi_remove(struct platform_device *pdev) | |
1223 | { | |
1224 | struct nwl_dsi *dsi = platform_get_drvdata(pdev); | |
1225 | ||
1226 | nwl_dsi_deselect_input(dsi); | |
1227 | mipi_dsi_host_unregister(&dsi->dsi_host); | |
1228 | drm_bridge_remove(&dsi->bridge); | |
1229 | pm_runtime_disable(&pdev->dev); | |
1230 | return 0; | |
1231 | } | |
1232 | ||
1233 | static struct platform_driver nwl_dsi_driver = { | |
1234 | .probe = nwl_dsi_probe, | |
1235 | .remove = nwl_dsi_remove, | |
1236 | .driver = { | |
1237 | .of_match_table = nwl_dsi_dt_ids, | |
1238 | .name = DRV_NAME, | |
1239 | }, | |
1240 | }; | |
1241 | ||
1242 | module_platform_driver(nwl_dsi_driver); | |
1243 | ||
1244 | MODULE_AUTHOR("NXP Semiconductor"); | |
1245 | MODULE_AUTHOR("Purism SPC"); | |
1246 | MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver"); | |
1247 | MODULE_LICENSE("GPL"); /* GPLv2 or later */ |