]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * System Control and Power Interface (SCPI) Message Protocol driver | |
3 | * | |
4 | * SCPI Message Protocol is used between the System Control Processor(SCP) | |
5 | * and the Application Processors(AP). The Message Handling Unit(MHU) | |
6 | * provides a mechanism for inter-processor communication between SCP's | |
7 | * Cortex M3 and AP. | |
8 | * | |
9 | * SCP offers control and management of the core/cluster power states, | |
10 | * various power domain DVFS including the core/cluster, certain system | |
11 | * clocks configuration, thermal sensors and many others. | |
12 | * | |
13 | * Copyright (C) 2015 ARM Ltd. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms and conditions of the GNU General Public License, | |
17 | * version 2, as published by the Free Software Foundation. | |
18 | * | |
19 | * This program is distributed in the hope it will be useful, but WITHOUT | |
20 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
21 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
22 | * more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License along | |
25 | * with this program. If not, see <http://www.gnu.org/licenses/>. | |
26 | */ | |
27 | ||
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
29 | ||
30 | #include <linux/bitmap.h> | |
31 | #include <linux/device.h> | |
32 | #include <linux/err.h> | |
33 | #include <linux/export.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/kernel.h> | |
36 | #include <linux/list.h> | |
37 | #include <linux/mailbox_client.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/of_address.h> | |
40 | #include <linux/of_platform.h> | |
41 | #include <linux/printk.h> | |
42 | #include <linux/scpi_protocol.h> | |
43 | #include <linux/slab.h> | |
44 | #include <linux/sort.h> | |
45 | #include <linux/spinlock.h> | |
46 | ||
47 | #define CMD_ID_SHIFT 0 | |
48 | #define CMD_ID_MASK 0x7f | |
49 | #define CMD_TOKEN_ID_SHIFT 8 | |
50 | #define CMD_TOKEN_ID_MASK 0xff | |
51 | #define CMD_DATA_SIZE_SHIFT 16 | |
52 | #define CMD_DATA_SIZE_MASK 0x1ff | |
53 | #define CMD_LEGACY_DATA_SIZE_SHIFT 20 | |
54 | #define CMD_LEGACY_DATA_SIZE_MASK 0x1ff | |
55 | #define PACK_SCPI_CMD(cmd_id, tx_sz) \ | |
56 | ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ | |
57 | (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) | |
58 | #define ADD_SCPI_TOKEN(cmd, token) \ | |
59 | ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) | |
60 | #define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \ | |
61 | ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ | |
62 | (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT)) | |
63 | ||
64 | #define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) | |
65 | #define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \ | |
66 | CMD_LEGACY_DATA_SIZE_MASK) | |
67 | #define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) | |
68 | #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) | |
69 | ||
70 | #define SCPI_SLOT 0 | |
71 | ||
72 | #define MAX_DVFS_DOMAINS 8 | |
73 | #define MAX_DVFS_OPPS 16 | |
74 | #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) | |
75 | #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) | |
76 | ||
77 | #define PROTOCOL_REV_MINOR_BITS 16 | |
78 | #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) | |
79 | #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) | |
80 | #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) | |
81 | ||
82 | #define FW_REV_MAJOR_BITS 24 | |
83 | #define FW_REV_MINOR_BITS 16 | |
84 | #define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) | |
85 | #define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) | |
86 | #define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) | |
87 | #define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) | |
88 | #define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) | |
89 | ||
90 | #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) | |
91 | ||
92 | enum scpi_error_codes { | |
93 | SCPI_SUCCESS = 0, /* Success */ | |
94 | SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */ | |
95 | SCPI_ERR_ALIGN = 2, /* Invalid alignment */ | |
96 | SCPI_ERR_SIZE = 3, /* Invalid size */ | |
97 | SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */ | |
98 | SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */ | |
99 | SCPI_ERR_RANGE = 6, /* Value out of range */ | |
100 | SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */ | |
101 | SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */ | |
102 | SCPI_ERR_PWRSTATE = 9, /* Invalid power state */ | |
103 | SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */ | |
104 | SCPI_ERR_DEVICE = 11, /* Device error */ | |
105 | SCPI_ERR_BUSY = 12, /* Device busy */ | |
106 | SCPI_ERR_MAX | |
107 | }; | |
108 | ||
109 | /* SCPI Standard commands */ | |
110 | enum scpi_std_cmd { | |
111 | SCPI_CMD_INVALID = 0x00, | |
112 | SCPI_CMD_SCPI_READY = 0x01, | |
113 | SCPI_CMD_SCPI_CAPABILITIES = 0x02, | |
114 | SCPI_CMD_SET_CSS_PWR_STATE = 0x03, | |
115 | SCPI_CMD_GET_CSS_PWR_STATE = 0x04, | |
116 | SCPI_CMD_SET_SYS_PWR_STATE = 0x05, | |
117 | SCPI_CMD_SET_CPU_TIMER = 0x06, | |
118 | SCPI_CMD_CANCEL_CPU_TIMER = 0x07, | |
119 | SCPI_CMD_DVFS_CAPABILITIES = 0x08, | |
120 | SCPI_CMD_GET_DVFS_INFO = 0x09, | |
121 | SCPI_CMD_SET_DVFS = 0x0a, | |
122 | SCPI_CMD_GET_DVFS = 0x0b, | |
123 | SCPI_CMD_GET_DVFS_STAT = 0x0c, | |
124 | SCPI_CMD_CLOCK_CAPABILITIES = 0x0d, | |
125 | SCPI_CMD_GET_CLOCK_INFO = 0x0e, | |
126 | SCPI_CMD_SET_CLOCK_VALUE = 0x0f, | |
127 | SCPI_CMD_GET_CLOCK_VALUE = 0x10, | |
128 | SCPI_CMD_PSU_CAPABILITIES = 0x11, | |
129 | SCPI_CMD_GET_PSU_INFO = 0x12, | |
130 | SCPI_CMD_SET_PSU = 0x13, | |
131 | SCPI_CMD_GET_PSU = 0x14, | |
132 | SCPI_CMD_SENSOR_CAPABILITIES = 0x15, | |
133 | SCPI_CMD_SENSOR_INFO = 0x16, | |
134 | SCPI_CMD_SENSOR_VALUE = 0x17, | |
135 | SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18, | |
136 | SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19, | |
137 | SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a, | |
138 | SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b, | |
139 | SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c, | |
140 | SCPI_CMD_COUNT | |
141 | }; | |
142 | ||
143 | /* SCPI Legacy Commands */ | |
144 | enum legacy_scpi_std_cmd { | |
145 | LEGACY_SCPI_CMD_INVALID = 0x00, | |
146 | LEGACY_SCPI_CMD_SCPI_READY = 0x01, | |
147 | LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02, | |
148 | LEGACY_SCPI_CMD_EVENT = 0x03, | |
149 | LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04, | |
150 | LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05, | |
151 | LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06, | |
152 | LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07, | |
153 | LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08, | |
154 | LEGACY_SCPI_CMD_L2_READY = 0x09, | |
155 | LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a, | |
156 | LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b, | |
157 | LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c, | |
158 | LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d, | |
159 | LEGACY_SCPI_CMD_SET_DVFS = 0x0e, | |
160 | LEGACY_SCPI_CMD_GET_DVFS = 0x0f, | |
161 | LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10, | |
162 | LEGACY_SCPI_CMD_SET_RTC = 0x11, | |
163 | LEGACY_SCPI_CMD_GET_RTC = 0x12, | |
164 | LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13, | |
165 | LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14, | |
166 | LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15, | |
167 | LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16, | |
168 | LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17, | |
169 | LEGACY_SCPI_CMD_SET_PSU = 0x18, | |
170 | LEGACY_SCPI_CMD_GET_PSU = 0x19, | |
171 | LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a, | |
172 | LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b, | |
173 | LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c, | |
174 | LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d, | |
175 | LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e, | |
176 | LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f, | |
177 | LEGACY_SCPI_CMD_COUNT | |
178 | }; | |
179 | ||
180 | /* List all commands that are required to go through the high priority link */ | |
181 | static int legacy_hpriority_cmds[] = { | |
182 | LEGACY_SCPI_CMD_GET_CSS_PWR_STATE, | |
183 | LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT, | |
184 | LEGACY_SCPI_CMD_GET_PWR_STATE_STAT, | |
185 | LEGACY_SCPI_CMD_SET_DVFS, | |
186 | LEGACY_SCPI_CMD_GET_DVFS, | |
187 | LEGACY_SCPI_CMD_SET_RTC, | |
188 | LEGACY_SCPI_CMD_GET_RTC, | |
189 | LEGACY_SCPI_CMD_SET_CLOCK_INDEX, | |
190 | LEGACY_SCPI_CMD_SET_CLOCK_VALUE, | |
191 | LEGACY_SCPI_CMD_GET_CLOCK_VALUE, | |
192 | LEGACY_SCPI_CMD_SET_PSU, | |
193 | LEGACY_SCPI_CMD_GET_PSU, | |
194 | LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC, | |
195 | LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS, | |
196 | }; | |
197 | ||
198 | /* List all commands used by this driver, used as indexes */ | |
199 | enum scpi_drv_cmds { | |
200 | CMD_SCPI_CAPABILITIES = 0, | |
201 | CMD_GET_CLOCK_INFO, | |
202 | CMD_GET_CLOCK_VALUE, | |
203 | CMD_SET_CLOCK_VALUE, | |
204 | CMD_GET_DVFS, | |
205 | CMD_SET_DVFS, | |
206 | CMD_GET_DVFS_INFO, | |
207 | CMD_SENSOR_CAPABILITIES, | |
208 | CMD_SENSOR_INFO, | |
209 | CMD_SENSOR_VALUE, | |
210 | CMD_SET_DEVICE_PWR_STATE, | |
211 | CMD_GET_DEVICE_PWR_STATE, | |
212 | CMD_MAX_COUNT, | |
213 | }; | |
214 | ||
215 | static int scpi_std_commands[CMD_MAX_COUNT] = { | |
216 | SCPI_CMD_SCPI_CAPABILITIES, | |
217 | SCPI_CMD_GET_CLOCK_INFO, | |
218 | SCPI_CMD_GET_CLOCK_VALUE, | |
219 | SCPI_CMD_SET_CLOCK_VALUE, | |
220 | SCPI_CMD_GET_DVFS, | |
221 | SCPI_CMD_SET_DVFS, | |
222 | SCPI_CMD_GET_DVFS_INFO, | |
223 | SCPI_CMD_SENSOR_CAPABILITIES, | |
224 | SCPI_CMD_SENSOR_INFO, | |
225 | SCPI_CMD_SENSOR_VALUE, | |
226 | SCPI_CMD_SET_DEVICE_PWR_STATE, | |
227 | SCPI_CMD_GET_DEVICE_PWR_STATE, | |
228 | }; | |
229 | ||
230 | static int scpi_legacy_commands[CMD_MAX_COUNT] = { | |
231 | LEGACY_SCPI_CMD_SCPI_CAPABILITIES, | |
232 | -1, /* GET_CLOCK_INFO */ | |
233 | LEGACY_SCPI_CMD_GET_CLOCK_VALUE, | |
234 | LEGACY_SCPI_CMD_SET_CLOCK_VALUE, | |
235 | LEGACY_SCPI_CMD_GET_DVFS, | |
236 | LEGACY_SCPI_CMD_SET_DVFS, | |
237 | LEGACY_SCPI_CMD_GET_DVFS_INFO, | |
238 | LEGACY_SCPI_CMD_SENSOR_CAPABILITIES, | |
239 | LEGACY_SCPI_CMD_SENSOR_INFO, | |
240 | LEGACY_SCPI_CMD_SENSOR_VALUE, | |
241 | -1, /* SET_DEVICE_PWR_STATE */ | |
242 | -1, /* GET_DEVICE_PWR_STATE */ | |
243 | }; | |
244 | ||
245 | struct scpi_xfer { | |
246 | u32 slot; /* has to be first element */ | |
247 | u32 cmd; | |
248 | u32 status; | |
249 | const void *tx_buf; | |
250 | void *rx_buf; | |
251 | unsigned int tx_len; | |
252 | unsigned int rx_len; | |
253 | struct list_head node; | |
254 | struct completion done; | |
255 | }; | |
256 | ||
257 | struct scpi_chan { | |
258 | struct mbox_client cl; | |
259 | struct mbox_chan *chan; | |
260 | void __iomem *tx_payload; | |
261 | void __iomem *rx_payload; | |
262 | struct list_head rx_pending; | |
263 | struct list_head xfers_list; | |
264 | struct scpi_xfer *xfers; | |
265 | spinlock_t rx_lock; /* locking for the rx pending list */ | |
266 | struct mutex xfers_lock; | |
267 | u8 token; | |
268 | }; | |
269 | ||
270 | struct scpi_drvinfo { | |
271 | u32 protocol_version; | |
272 | u32 firmware_version; | |
273 | bool is_legacy; | |
274 | int num_chans; | |
275 | int *commands; | |
276 | DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT); | |
277 | atomic_t next_chan; | |
278 | struct scpi_ops *scpi_ops; | |
279 | struct scpi_chan *channels; | |
280 | struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS]; | |
281 | }; | |
282 | ||
283 | /* | |
284 | * The SCP firmware only executes in little-endian mode, so any buffers | |
285 | * shared through SCPI should have their contents converted to little-endian | |
286 | */ | |
287 | struct scpi_shared_mem { | |
288 | __le32 command; | |
289 | __le32 status; | |
290 | u8 payload[0]; | |
291 | } __packed; | |
292 | ||
293 | struct legacy_scpi_shared_mem { | |
294 | __le32 status; | |
295 | u8 payload[0]; | |
296 | } __packed; | |
297 | ||
298 | struct scp_capabilities { | |
299 | __le32 protocol_version; | |
300 | __le32 event_version; | |
301 | __le32 platform_version; | |
302 | __le32 commands[4]; | |
303 | } __packed; | |
304 | ||
305 | struct clk_get_info { | |
306 | __le16 id; | |
307 | __le16 flags; | |
308 | __le32 min_rate; | |
309 | __le32 max_rate; | |
310 | u8 name[20]; | |
311 | } __packed; | |
312 | ||
313 | struct clk_get_value { | |
314 | __le32 rate; | |
315 | } __packed; | |
316 | ||
317 | struct clk_set_value { | |
318 | __le16 id; | |
319 | __le16 reserved; | |
320 | __le32 rate; | |
321 | } __packed; | |
322 | ||
323 | struct legacy_clk_set_value { | |
324 | __le32 rate; | |
325 | __le16 id; | |
326 | __le16 reserved; | |
327 | } __packed; | |
328 | ||
329 | struct dvfs_info { | |
330 | __le32 header; | |
331 | struct { | |
332 | __le32 freq; | |
333 | __le32 m_volt; | |
334 | } opps[MAX_DVFS_OPPS]; | |
335 | } __packed; | |
336 | ||
337 | struct dvfs_set { | |
338 | u8 domain; | |
339 | u8 index; | |
340 | } __packed; | |
341 | ||
342 | struct sensor_capabilities { | |
343 | __le16 sensors; | |
344 | } __packed; | |
345 | ||
346 | struct _scpi_sensor_info { | |
347 | __le16 sensor_id; | |
348 | u8 class; | |
349 | u8 trigger_type; | |
350 | char name[20]; | |
351 | }; | |
352 | ||
353 | struct sensor_value { | |
354 | __le32 lo_val; | |
355 | __le32 hi_val; | |
356 | } __packed; | |
357 | ||
358 | struct dev_pstate_set { | |
359 | u16 dev_id; | |
360 | u8 pstate; | |
361 | } __packed; | |
362 | ||
363 | static struct scpi_drvinfo *scpi_info; | |
364 | ||
365 | static int scpi_linux_errmap[SCPI_ERR_MAX] = { | |
366 | /* better than switch case as long as return value is continuous */ | |
367 | 0, /* SCPI_SUCCESS */ | |
368 | -EINVAL, /* SCPI_ERR_PARAM */ | |
369 | -ENOEXEC, /* SCPI_ERR_ALIGN */ | |
370 | -EMSGSIZE, /* SCPI_ERR_SIZE */ | |
371 | -EINVAL, /* SCPI_ERR_HANDLER */ | |
372 | -EACCES, /* SCPI_ERR_ACCESS */ | |
373 | -ERANGE, /* SCPI_ERR_RANGE */ | |
374 | -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */ | |
375 | -ENOMEM, /* SCPI_ERR_NOMEM */ | |
376 | -EINVAL, /* SCPI_ERR_PWRSTATE */ | |
377 | -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */ | |
378 | -EIO, /* SCPI_ERR_DEVICE */ | |
379 | -EBUSY, /* SCPI_ERR_BUSY */ | |
380 | }; | |
381 | ||
382 | static inline int scpi_to_linux_errno(int errno) | |
383 | { | |
384 | if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX) | |
385 | return scpi_linux_errmap[errno]; | |
386 | return -EIO; | |
387 | } | |
388 | ||
389 | static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) | |
390 | { | |
391 | unsigned long flags; | |
392 | struct scpi_xfer *t, *match = NULL; | |
393 | ||
394 | spin_lock_irqsave(&ch->rx_lock, flags); | |
395 | if (list_empty(&ch->rx_pending)) { | |
396 | spin_unlock_irqrestore(&ch->rx_lock, flags); | |
397 | return; | |
398 | } | |
399 | ||
400 | /* Command type is not replied by the SCP Firmware in legacy Mode | |
401 | * We should consider that command is the head of pending RX commands | |
402 | * if the list is not empty. In TX only mode, the list would be empty. | |
403 | */ | |
404 | if (scpi_info->is_legacy) { | |
405 | match = list_first_entry(&ch->rx_pending, struct scpi_xfer, | |
406 | node); | |
407 | list_del(&match->node); | |
408 | } else { | |
409 | list_for_each_entry(t, &ch->rx_pending, node) | |
410 | if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { | |
411 | list_del(&t->node); | |
412 | match = t; | |
413 | break; | |
414 | } | |
415 | } | |
416 | /* check if wait_for_completion is in progress or timed-out */ | |
417 | if (match && !completion_done(&match->done)) { | |
418 | unsigned int len; | |
419 | ||
420 | if (scpi_info->is_legacy) { | |
421 | struct legacy_scpi_shared_mem *mem = ch->rx_payload; | |
422 | ||
423 | /* RX Length is not replied by the legacy Firmware */ | |
424 | len = match->rx_len; | |
425 | ||
426 | match->status = le32_to_cpu(mem->status); | |
427 | memcpy_fromio(match->rx_buf, mem->payload, len); | |
428 | } else { | |
429 | struct scpi_shared_mem *mem = ch->rx_payload; | |
430 | ||
431 | len = min(match->rx_len, CMD_SIZE(cmd)); | |
432 | ||
433 | match->status = le32_to_cpu(mem->status); | |
434 | memcpy_fromio(match->rx_buf, mem->payload, len); | |
435 | } | |
436 | ||
437 | if (match->rx_len > len) | |
438 | memset(match->rx_buf + len, 0, match->rx_len - len); | |
439 | complete(&match->done); | |
440 | } | |
441 | spin_unlock_irqrestore(&ch->rx_lock, flags); | |
442 | } | |
443 | ||
444 | static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) | |
445 | { | |
446 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); | |
447 | struct scpi_shared_mem *mem = ch->rx_payload; | |
448 | u32 cmd = 0; | |
449 | ||
450 | if (!scpi_info->is_legacy) | |
451 | cmd = le32_to_cpu(mem->command); | |
452 | ||
453 | scpi_process_cmd(ch, cmd); | |
454 | } | |
455 | ||
456 | static void scpi_tx_prepare(struct mbox_client *c, void *msg) | |
457 | { | |
458 | unsigned long flags; | |
459 | struct scpi_xfer *t = msg; | |
460 | struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); | |
461 | struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; | |
462 | ||
463 | if (t->tx_buf) { | |
464 | if (scpi_info->is_legacy) | |
465 | memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len); | |
466 | else | |
467 | memcpy_toio(mem->payload, t->tx_buf, t->tx_len); | |
468 | } | |
469 | ||
470 | if (t->rx_buf) { | |
471 | if (!(++ch->token)) | |
472 | ++ch->token; | |
473 | ADD_SCPI_TOKEN(t->cmd, ch->token); | |
474 | spin_lock_irqsave(&ch->rx_lock, flags); | |
475 | list_add_tail(&t->node, &ch->rx_pending); | |
476 | spin_unlock_irqrestore(&ch->rx_lock, flags); | |
477 | } | |
478 | ||
479 | if (!scpi_info->is_legacy) | |
480 | mem->command = cpu_to_le32(t->cmd); | |
481 | } | |
482 | ||
483 | static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) | |
484 | { | |
485 | struct scpi_xfer *t; | |
486 | ||
487 | mutex_lock(&ch->xfers_lock); | |
488 | if (list_empty(&ch->xfers_list)) { | |
489 | mutex_unlock(&ch->xfers_lock); | |
490 | return NULL; | |
491 | } | |
492 | t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node); | |
493 | list_del(&t->node); | |
494 | mutex_unlock(&ch->xfers_lock); | |
495 | return t; | |
496 | } | |
497 | ||
498 | static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch) | |
499 | { | |
500 | mutex_lock(&ch->xfers_lock); | |
501 | list_add_tail(&t->node, &ch->xfers_list); | |
502 | mutex_unlock(&ch->xfers_lock); | |
503 | } | |
504 | ||
505 | static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len, | |
506 | void *rx_buf, unsigned int rx_len) | |
507 | { | |
508 | int ret; | |
509 | u8 chan; | |
510 | u8 cmd; | |
511 | struct scpi_xfer *msg; | |
512 | struct scpi_chan *scpi_chan; | |
513 | ||
514 | if (scpi_info->commands[idx] < 0) | |
515 | return -EOPNOTSUPP; | |
516 | ||
517 | cmd = scpi_info->commands[idx]; | |
518 | ||
519 | if (scpi_info->is_legacy) | |
520 | chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0; | |
521 | else | |
522 | chan = atomic_inc_return(&scpi_info->next_chan) % | |
523 | scpi_info->num_chans; | |
524 | scpi_chan = scpi_info->channels + chan; | |
525 | ||
526 | msg = get_scpi_xfer(scpi_chan); | |
527 | if (!msg) | |
528 | return -ENOMEM; | |
529 | ||
530 | if (scpi_info->is_legacy) { | |
531 | msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len); | |
532 | msg->slot = msg->cmd; | |
533 | } else { | |
534 | msg->slot = BIT(SCPI_SLOT); | |
535 | msg->cmd = PACK_SCPI_CMD(cmd, tx_len); | |
536 | } | |
537 | msg->tx_buf = tx_buf; | |
538 | msg->tx_len = tx_len; | |
539 | msg->rx_buf = rx_buf; | |
540 | msg->rx_len = rx_len; | |
541 | init_completion(&msg->done); | |
542 | ||
543 | ret = mbox_send_message(scpi_chan->chan, msg); | |
544 | if (ret < 0 || !rx_buf) | |
545 | goto out; | |
546 | ||
547 | if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT)) | |
548 | ret = -ETIMEDOUT; | |
549 | else | |
550 | /* first status word */ | |
551 | ret = msg->status; | |
552 | out: | |
553 | if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */ | |
554 | scpi_process_cmd(scpi_chan, msg->cmd); | |
555 | ||
556 | put_scpi_xfer(msg, scpi_chan); | |
557 | /* SCPI error codes > 0, translate them to Linux scale*/ | |
558 | return ret > 0 ? scpi_to_linux_errno(ret) : ret; | |
559 | } | |
560 | ||
561 | static u32 scpi_get_version(void) | |
562 | { | |
563 | return scpi_info->protocol_version; | |
564 | } | |
565 | ||
566 | static int | |
567 | scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) | |
568 | { | |
569 | int ret; | |
570 | struct clk_get_info clk; | |
571 | __le16 le_clk_id = cpu_to_le16(clk_id); | |
572 | ||
573 | ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id, | |
574 | sizeof(le_clk_id), &clk, sizeof(clk)); | |
575 | if (!ret) { | |
576 | *min = le32_to_cpu(clk.min_rate); | |
577 | *max = le32_to_cpu(clk.max_rate); | |
578 | } | |
579 | return ret; | |
580 | } | |
581 | ||
582 | static unsigned long scpi_clk_get_val(u16 clk_id) | |
583 | { | |
584 | int ret; | |
585 | struct clk_get_value clk; | |
586 | __le16 le_clk_id = cpu_to_le16(clk_id); | |
587 | ||
588 | ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, | |
589 | sizeof(le_clk_id), &clk, sizeof(clk)); | |
590 | ||
591 | return ret ? ret : le32_to_cpu(clk.rate); | |
592 | } | |
593 | ||
594 | static int scpi_clk_set_val(u16 clk_id, unsigned long rate) | |
595 | { | |
596 | int stat; | |
597 | struct clk_set_value clk = { | |
598 | .id = cpu_to_le16(clk_id), | |
599 | .rate = cpu_to_le32(rate) | |
600 | }; | |
601 | ||
602 | return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), | |
603 | &stat, sizeof(stat)); | |
604 | } | |
605 | ||
606 | static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate) | |
607 | { | |
608 | int stat; | |
609 | struct legacy_clk_set_value clk = { | |
610 | .id = cpu_to_le16(clk_id), | |
611 | .rate = cpu_to_le32(rate) | |
612 | }; | |
613 | ||
614 | return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), | |
615 | &stat, sizeof(stat)); | |
616 | } | |
617 | ||
618 | static int scpi_dvfs_get_idx(u8 domain) | |
619 | { | |
620 | int ret; | |
621 | u8 dvfs_idx; | |
622 | ||
623 | ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain), | |
624 | &dvfs_idx, sizeof(dvfs_idx)); | |
625 | ||
626 | return ret ? ret : dvfs_idx; | |
627 | } | |
628 | ||
629 | static int scpi_dvfs_set_idx(u8 domain, u8 index) | |
630 | { | |
631 | int stat; | |
632 | struct dvfs_set dvfs = {domain, index}; | |
633 | ||
634 | return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs), | |
635 | &stat, sizeof(stat)); | |
636 | } | |
637 | ||
638 | static int opp_cmp_func(const void *opp1, const void *opp2) | |
639 | { | |
640 | const struct scpi_opp *t1 = opp1, *t2 = opp2; | |
641 | ||
642 | return t1->freq - t2->freq; | |
643 | } | |
644 | ||
645 | static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) | |
646 | { | |
647 | struct scpi_dvfs_info *info; | |
648 | struct scpi_opp *opp; | |
649 | struct dvfs_info buf; | |
650 | int ret, i; | |
651 | ||
652 | if (domain >= MAX_DVFS_DOMAINS) | |
653 | return ERR_PTR(-EINVAL); | |
654 | ||
655 | if (scpi_info->dvfs[domain]) /* data already populated */ | |
656 | return scpi_info->dvfs[domain]; | |
657 | ||
658 | ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), | |
659 | &buf, sizeof(buf)); | |
660 | if (ret) | |
661 | return ERR_PTR(ret); | |
662 | ||
663 | info = kmalloc(sizeof(*info), GFP_KERNEL); | |
664 | if (!info) | |
665 | return ERR_PTR(-ENOMEM); | |
666 | ||
667 | info->count = DVFS_OPP_COUNT(buf.header); | |
668 | info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ | |
669 | ||
670 | info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); | |
671 | if (!info->opps) { | |
672 | kfree(info); | |
673 | return ERR_PTR(-ENOMEM); | |
674 | } | |
675 | ||
676 | for (i = 0, opp = info->opps; i < info->count; i++, opp++) { | |
677 | opp->freq = le32_to_cpu(buf.opps[i].freq); | |
678 | opp->m_volt = le32_to_cpu(buf.opps[i].m_volt); | |
679 | } | |
680 | ||
681 | sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); | |
682 | ||
683 | scpi_info->dvfs[domain] = info; | |
684 | return info; | |
685 | } | |
686 | ||
687 | static int scpi_sensor_get_capability(u16 *sensors) | |
688 | { | |
689 | struct sensor_capabilities cap_buf; | |
690 | int ret; | |
691 | ||
692 | ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, | |
693 | sizeof(cap_buf)); | |
694 | if (!ret) | |
695 | *sensors = le16_to_cpu(cap_buf.sensors); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) | |
701 | { | |
702 | __le16 id = cpu_to_le16(sensor_id); | |
703 | struct _scpi_sensor_info _info; | |
704 | int ret; | |
705 | ||
706 | ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id), | |
707 | &_info, sizeof(_info)); | |
708 | if (!ret) { | |
709 | memcpy(info, &_info, sizeof(*info)); | |
710 | info->sensor_id = le16_to_cpu(_info.sensor_id); | |
711 | } | |
712 | ||
713 | return ret; | |
714 | } | |
715 | ||
716 | static int scpi_sensor_get_value(u16 sensor, u64 *val) | |
717 | { | |
718 | __le16 id = cpu_to_le16(sensor); | |
719 | struct sensor_value buf; | |
720 | int ret; | |
721 | ||
722 | ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), | |
723 | &buf, sizeof(buf)); | |
724 | if (ret) | |
725 | return ret; | |
726 | ||
727 | if (scpi_info->is_legacy) | |
728 | /* only 32-bits supported, hi_val can be junk */ | |
729 | *val = le32_to_cpu(buf.lo_val); | |
730 | else | |
731 | *val = (u64)le32_to_cpu(buf.hi_val) << 32 | | |
732 | le32_to_cpu(buf.lo_val); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static int scpi_device_get_power_state(u16 dev_id) | |
738 | { | |
739 | int ret; | |
740 | u8 pstate; | |
741 | __le16 id = cpu_to_le16(dev_id); | |
742 | ||
743 | ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id, | |
744 | sizeof(id), &pstate, sizeof(pstate)); | |
745 | return ret ? ret : pstate; | |
746 | } | |
747 | ||
748 | static int scpi_device_set_power_state(u16 dev_id, u8 pstate) | |
749 | { | |
750 | int stat; | |
751 | struct dev_pstate_set dev_set = { | |
752 | .dev_id = cpu_to_le16(dev_id), | |
753 | .pstate = pstate, | |
754 | }; | |
755 | ||
756 | return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set, | |
757 | sizeof(dev_set), &stat, sizeof(stat)); | |
758 | } | |
759 | ||
760 | static struct scpi_ops scpi_ops = { | |
761 | .get_version = scpi_get_version, | |
762 | .clk_get_range = scpi_clk_get_range, | |
763 | .clk_get_val = scpi_clk_get_val, | |
764 | .clk_set_val = scpi_clk_set_val, | |
765 | .dvfs_get_idx = scpi_dvfs_get_idx, | |
766 | .dvfs_set_idx = scpi_dvfs_set_idx, | |
767 | .dvfs_get_info = scpi_dvfs_get_info, | |
768 | .sensor_get_capability = scpi_sensor_get_capability, | |
769 | .sensor_get_info = scpi_sensor_get_info, | |
770 | .sensor_get_value = scpi_sensor_get_value, | |
771 | .device_get_power_state = scpi_device_get_power_state, | |
772 | .device_set_power_state = scpi_device_set_power_state, | |
773 | }; | |
774 | ||
775 | struct scpi_ops *get_scpi_ops(void) | |
776 | { | |
777 | return scpi_info ? scpi_info->scpi_ops : NULL; | |
778 | } | |
779 | EXPORT_SYMBOL_GPL(get_scpi_ops); | |
780 | ||
781 | static int scpi_init_versions(struct scpi_drvinfo *info) | |
782 | { | |
783 | int ret; | |
784 | struct scp_capabilities caps; | |
785 | ||
786 | ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0, | |
787 | &caps, sizeof(caps)); | |
788 | if (!ret) { | |
789 | info->protocol_version = le32_to_cpu(caps.protocol_version); | |
790 | info->firmware_version = le32_to_cpu(caps.platform_version); | |
791 | } | |
792 | /* Ignore error if not implemented */ | |
793 | if (scpi_info->is_legacy && ret == -EOPNOTSUPP) | |
794 | return 0; | |
795 | ||
796 | return ret; | |
797 | } | |
798 | ||
799 | static ssize_t protocol_version_show(struct device *dev, | |
800 | struct device_attribute *attr, char *buf) | |
801 | { | |
802 | struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); | |
803 | ||
804 | return sprintf(buf, "%d.%d\n", | |
805 | PROTOCOL_REV_MAJOR(scpi_info->protocol_version), | |
806 | PROTOCOL_REV_MINOR(scpi_info->protocol_version)); | |
807 | } | |
808 | static DEVICE_ATTR_RO(protocol_version); | |
809 | ||
810 | static ssize_t firmware_version_show(struct device *dev, | |
811 | struct device_attribute *attr, char *buf) | |
812 | { | |
813 | struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); | |
814 | ||
815 | return sprintf(buf, "%d.%d.%d\n", | |
816 | FW_REV_MAJOR(scpi_info->firmware_version), | |
817 | FW_REV_MINOR(scpi_info->firmware_version), | |
818 | FW_REV_PATCH(scpi_info->firmware_version)); | |
819 | } | |
820 | static DEVICE_ATTR_RO(firmware_version); | |
821 | ||
822 | static struct attribute *versions_attrs[] = { | |
823 | &dev_attr_firmware_version.attr, | |
824 | &dev_attr_protocol_version.attr, | |
825 | NULL, | |
826 | }; | |
827 | ATTRIBUTE_GROUPS(versions); | |
828 | ||
829 | static void | |
830 | scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) | |
831 | { | |
832 | int i; | |
833 | ||
834 | for (i = 0; i < count && pchan->chan; i++, pchan++) { | |
835 | mbox_free_channel(pchan->chan); | |
836 | devm_kfree(dev, pchan->xfers); | |
837 | devm_iounmap(dev, pchan->rx_payload); | |
838 | } | |
839 | } | |
840 | ||
841 | static int scpi_remove(struct platform_device *pdev) | |
842 | { | |
843 | int i; | |
844 | struct device *dev = &pdev->dev; | |
845 | struct scpi_drvinfo *info = platform_get_drvdata(pdev); | |
846 | ||
847 | scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ | |
848 | ||
849 | of_platform_depopulate(dev); | |
850 | sysfs_remove_groups(&dev->kobj, versions_groups); | |
851 | scpi_free_channels(dev, info->channels, info->num_chans); | |
852 | platform_set_drvdata(pdev, NULL); | |
853 | ||
854 | for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { | |
855 | kfree(info->dvfs[i]->opps); | |
856 | kfree(info->dvfs[i]); | |
857 | } | |
858 | devm_kfree(dev, info->channels); | |
859 | devm_kfree(dev, info); | |
860 | ||
861 | return 0; | |
862 | } | |
863 | ||
864 | #define MAX_SCPI_XFERS 10 | |
865 | static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) | |
866 | { | |
867 | int i; | |
868 | struct scpi_xfer *xfers; | |
869 | ||
870 | xfers = devm_kzalloc(dev, MAX_SCPI_XFERS * sizeof(*xfers), GFP_KERNEL); | |
871 | if (!xfers) | |
872 | return -ENOMEM; | |
873 | ||
874 | ch->xfers = xfers; | |
875 | for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) | |
876 | list_add_tail(&xfers->node, &ch->xfers_list); | |
877 | return 0; | |
878 | } | |
879 | ||
880 | static const struct of_device_id legacy_scpi_of_match[] = { | |
881 | {.compatible = "arm,scpi-pre-1.0"}, | |
882 | {}, | |
883 | }; | |
884 | ||
885 | static int scpi_probe(struct platform_device *pdev) | |
886 | { | |
887 | int count, idx, ret; | |
888 | struct resource res; | |
889 | struct scpi_chan *scpi_chan; | |
890 | struct device *dev = &pdev->dev; | |
891 | struct device_node *np = dev->of_node; | |
892 | ||
893 | scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); | |
894 | if (!scpi_info) | |
895 | return -ENOMEM; | |
896 | ||
897 | if (of_match_device(legacy_scpi_of_match, &pdev->dev)) | |
898 | scpi_info->is_legacy = true; | |
899 | ||
900 | count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); | |
901 | if (count < 0) { | |
902 | dev_err(dev, "no mboxes property in '%s'\n", np->full_name); | |
903 | return -ENODEV; | |
904 | } | |
905 | ||
906 | scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); | |
907 | if (!scpi_chan) | |
908 | return -ENOMEM; | |
909 | ||
910 | for (idx = 0; idx < count; idx++) { | |
911 | resource_size_t size; | |
912 | struct scpi_chan *pchan = scpi_chan + idx; | |
913 | struct mbox_client *cl = &pchan->cl; | |
914 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); | |
915 | ||
916 | ret = of_address_to_resource(shmem, 0, &res); | |
917 | of_node_put(shmem); | |
918 | if (ret) { | |
919 | dev_err(dev, "failed to get SCPI payload mem resource\n"); | |
920 | goto err; | |
921 | } | |
922 | ||
923 | size = resource_size(&res); | |
924 | pchan->rx_payload = devm_ioremap(dev, res.start, size); | |
925 | if (!pchan->rx_payload) { | |
926 | dev_err(dev, "failed to ioremap SCPI payload\n"); | |
927 | ret = -EADDRNOTAVAIL; | |
928 | goto err; | |
929 | } | |
930 | pchan->tx_payload = pchan->rx_payload + (size >> 1); | |
931 | ||
932 | cl->dev = dev; | |
933 | cl->rx_callback = scpi_handle_remote_msg; | |
934 | cl->tx_prepare = scpi_tx_prepare; | |
935 | cl->tx_block = true; | |
936 | cl->tx_tout = 20; | |
937 | cl->knows_txdone = false; /* controller can't ack */ | |
938 | ||
939 | INIT_LIST_HEAD(&pchan->rx_pending); | |
940 | INIT_LIST_HEAD(&pchan->xfers_list); | |
941 | spin_lock_init(&pchan->rx_lock); | |
942 | mutex_init(&pchan->xfers_lock); | |
943 | ||
944 | ret = scpi_alloc_xfer_list(dev, pchan); | |
945 | if (!ret) { | |
946 | pchan->chan = mbox_request_channel(cl, idx); | |
947 | if (!IS_ERR(pchan->chan)) | |
948 | continue; | |
949 | ret = PTR_ERR(pchan->chan); | |
950 | if (ret != -EPROBE_DEFER) | |
951 | dev_err(dev, "failed to get channel%d err %d\n", | |
952 | idx, ret); | |
953 | } | |
954 | err: | |
955 | scpi_free_channels(dev, scpi_chan, idx); | |
956 | scpi_info = NULL; | |
957 | return ret; | |
958 | } | |
959 | ||
960 | scpi_info->channels = scpi_chan; | |
961 | scpi_info->num_chans = count; | |
962 | scpi_info->commands = scpi_std_commands; | |
963 | ||
964 | platform_set_drvdata(pdev, scpi_info); | |
965 | ||
966 | if (scpi_info->is_legacy) { | |
967 | /* Replace with legacy variants */ | |
968 | scpi_ops.clk_set_val = legacy_scpi_clk_set_val; | |
969 | scpi_info->commands = scpi_legacy_commands; | |
970 | ||
971 | /* Fill priority bitmap */ | |
972 | for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) | |
973 | set_bit(legacy_hpriority_cmds[idx], | |
974 | scpi_info->cmd_priority); | |
975 | } | |
976 | ||
977 | ret = scpi_init_versions(scpi_info); | |
978 | if (ret) { | |
979 | dev_err(dev, "incorrect or no SCP firmware found\n"); | |
980 | scpi_remove(pdev); | |
981 | return ret; | |
982 | } | |
983 | ||
984 | _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", | |
985 | PROTOCOL_REV_MAJOR(scpi_info->protocol_version), | |
986 | PROTOCOL_REV_MINOR(scpi_info->protocol_version), | |
987 | FW_REV_MAJOR(scpi_info->firmware_version), | |
988 | FW_REV_MINOR(scpi_info->firmware_version), | |
989 | FW_REV_PATCH(scpi_info->firmware_version)); | |
990 | scpi_info->scpi_ops = &scpi_ops; | |
991 | ||
992 | ret = sysfs_create_groups(&dev->kobj, versions_groups); | |
993 | if (ret) | |
994 | dev_err(dev, "unable to create sysfs version group\n"); | |
995 | ||
996 | return of_platform_populate(dev->of_node, NULL, NULL, dev); | |
997 | } | |
998 | ||
999 | static const struct of_device_id scpi_of_match[] = { | |
1000 | {.compatible = "arm,scpi"}, | |
1001 | {.compatible = "arm,scpi-pre-1.0"}, | |
1002 | {}, | |
1003 | }; | |
1004 | ||
1005 | MODULE_DEVICE_TABLE(of, scpi_of_match); | |
1006 | ||
1007 | static struct platform_driver scpi_driver = { | |
1008 | .driver = { | |
1009 | .name = "scpi_protocol", | |
1010 | .of_match_table = scpi_of_match, | |
1011 | }, | |
1012 | .probe = scpi_probe, | |
1013 | .remove = scpi_remove, | |
1014 | }; | |
1015 | module_platform_driver(scpi_driver); | |
1016 | ||
1017 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); | |
1018 | MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver"); | |
1019 | MODULE_LICENSE("GPL v2"); |