]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/remoteproc/qcom_wcnss.c
Merge tag 'for-5.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[mirror_ubuntu-jammy-kernel.git] / drivers / remoteproc / qcom_wcnss.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
4 *
5 * Copyright (C) 2016 Linaro Ltd
6 * Copyright (C) 2014 Sony Mobile Communications AB
7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/io.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/qcom_scm.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/soc/qcom/mdt_loader.h>
26 #include <linux/soc/qcom/smem.h>
27 #include <linux/soc/qcom/smem_state.h>
28 #include <linux/rpmsg/qcom_smd.h>
29
30 #include "qcom_common.h"
31 #include "remoteproc_internal.h"
32 #include "qcom_pil_info.h"
33 #include "qcom_wcnss.h"
34
35 #define WCNSS_CRASH_REASON_SMEM 422
36 #define WCNSS_FIRMWARE_NAME "wcnss.mdt"
37 #define WCNSS_PAS_ID 6
38 #define WCNSS_SSCTL_ID 0x13
39
40 #define WCNSS_SPARE_NVBIN_DLND BIT(25)
41
42 #define WCNSS_PMU_IRIS_XO_CFG BIT(3)
43 #define WCNSS_PMU_IRIS_XO_EN BIT(4)
44 #define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
45 #define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
46
47 #define WCNSS_PMU_IRIS_RESET BIT(7)
48 #define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
49 #define WCNSS_PMU_IRIS_XO_READ BIT(9)
50 #define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
51
52 #define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
53 #define WCNSS_PMU_XO_MODE_19p2 0
54 #define WCNSS_PMU_XO_MODE_48 3
55
56 #define WCNSS_MAX_PDS 2
57
58 struct wcnss_data {
59 size_t pmu_offset;
60 size_t spare_offset;
61
62 const char *pd_names[WCNSS_MAX_PDS];
63 const struct wcnss_vreg_info *vregs;
64 size_t num_vregs, num_pd_vregs;
65 };
66
67 struct qcom_wcnss {
68 struct device *dev;
69 struct rproc *rproc;
70
71 void __iomem *pmu_cfg;
72 void __iomem *spare_out;
73
74 bool use_48mhz_xo;
75
76 int wdog_irq;
77 int fatal_irq;
78 int ready_irq;
79 int handover_irq;
80 int stop_ack_irq;
81
82 struct qcom_smem_state *state;
83 unsigned stop_bit;
84
85 struct mutex iris_lock;
86 struct qcom_iris *iris;
87
88 struct device *pds[WCNSS_MAX_PDS];
89 size_t num_pds;
90 struct regulator_bulk_data *vregs;
91 size_t num_vregs;
92
93 struct completion start_done;
94 struct completion stop_done;
95
96 phys_addr_t mem_phys;
97 phys_addr_t mem_reloc;
98 void *mem_region;
99 size_t mem_size;
100
101 struct qcom_rproc_subdev smd_subdev;
102 struct qcom_sysmon *sysmon;
103 };
104
105 static const struct wcnss_data riva_data = {
106 .pmu_offset = 0x28,
107 .spare_offset = 0xb4,
108
109 .vregs = (struct wcnss_vreg_info[]) {
110 { "vddmx", 1050000, 1150000, 0 },
111 { "vddcx", 1050000, 1150000, 0 },
112 { "vddpx", 1800000, 1800000, 0 },
113 },
114 .num_vregs = 3,
115 };
116
117 static const struct wcnss_data pronto_v1_data = {
118 .pmu_offset = 0x1004,
119 .spare_offset = 0x1088,
120
121 .pd_names = { "mx", "cx" },
122 .vregs = (struct wcnss_vreg_info[]) {
123 { "vddmx", 950000, 1150000, 0 },
124 { "vddcx", .super_turbo = true},
125 { "vddpx", 1800000, 1800000, 0 },
126 },
127 .num_pd_vregs = 2,
128 .num_vregs = 1,
129 };
130
131 static const struct wcnss_data pronto_v2_data = {
132 .pmu_offset = 0x1004,
133 .spare_offset = 0x1088,
134
135 .pd_names = { "mx", "cx" },
136 .vregs = (struct wcnss_vreg_info[]) {
137 { "vddmx", 1287500, 1287500, 0 },
138 { "vddcx", .super_turbo = true },
139 { "vddpx", 1800000, 1800000, 0 },
140 },
141 .num_pd_vregs = 2,
142 .num_vregs = 1,
143 };
144
145 static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
146 {
147 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
148 int ret;
149
150 ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
151 wcnss->mem_region, wcnss->mem_phys,
152 wcnss->mem_size, &wcnss->mem_reloc);
153 if (ret)
154 return ret;
155
156 qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
157
158 return 0;
159 }
160
161 static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
162 {
163 u32 val;
164
165 /* Indicate NV download capability */
166 val = readl(wcnss->spare_out);
167 val |= WCNSS_SPARE_NVBIN_DLND;
168 writel(val, wcnss->spare_out);
169 }
170
171 static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
172 {
173 u32 val;
174
175 /* Clear PMU cfg register */
176 writel(0, wcnss->pmu_cfg);
177
178 val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
179 writel(val, wcnss->pmu_cfg);
180
181 /* Clear XO_MODE */
182 val &= ~WCNSS_PMU_XO_MODE_MASK;
183 if (wcnss->use_48mhz_xo)
184 val |= WCNSS_PMU_XO_MODE_48 << 1;
185 else
186 val |= WCNSS_PMU_XO_MODE_19p2 << 1;
187 writel(val, wcnss->pmu_cfg);
188
189 /* Reset IRIS */
190 val |= WCNSS_PMU_IRIS_RESET;
191 writel(val, wcnss->pmu_cfg);
192
193 /* Wait for PMU.iris_reg_reset_sts */
194 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
195 cpu_relax();
196
197 /* Clear IRIS reset */
198 val &= ~WCNSS_PMU_IRIS_RESET;
199 writel(val, wcnss->pmu_cfg);
200
201 /* Start IRIS XO configuration */
202 val |= WCNSS_PMU_IRIS_XO_CFG;
203 writel(val, wcnss->pmu_cfg);
204
205 /* Wait for XO configuration to finish */
206 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
207 cpu_relax();
208
209 /* Stop IRIS XO configuration */
210 val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
211 val &= ~WCNSS_PMU_IRIS_XO_CFG;
212 writel(val, wcnss->pmu_cfg);
213
214 /* Add some delay for XO to settle */
215 msleep(20);
216 }
217
218 static int wcnss_start(struct rproc *rproc)
219 {
220 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
221 int ret, i;
222
223 mutex_lock(&wcnss->iris_lock);
224 if (!wcnss->iris) {
225 dev_err(wcnss->dev, "no iris registered\n");
226 ret = -EINVAL;
227 goto release_iris_lock;
228 }
229
230 for (i = 0; i < wcnss->num_pds; i++) {
231 dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
232 ret = pm_runtime_get_sync(wcnss->pds[i]);
233 if (ret < 0) {
234 pm_runtime_put_noidle(wcnss->pds[i]);
235 goto disable_pds;
236 }
237 }
238
239 ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
240 if (ret)
241 goto disable_pds;
242
243 ret = qcom_iris_enable(wcnss->iris);
244 if (ret)
245 goto disable_regulators;
246
247 wcnss_indicate_nv_download(wcnss);
248 wcnss_configure_iris(wcnss);
249
250 ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
251 if (ret) {
252 dev_err(wcnss->dev,
253 "failed to authenticate image and release reset\n");
254 goto disable_iris;
255 }
256
257 ret = wait_for_completion_timeout(&wcnss->start_done,
258 msecs_to_jiffies(5000));
259 if (wcnss->ready_irq > 0 && ret == 0) {
260 /* We have a ready_irq, but it didn't fire in time. */
261 dev_err(wcnss->dev, "start timed out\n");
262 qcom_scm_pas_shutdown(WCNSS_PAS_ID);
263 ret = -ETIMEDOUT;
264 goto disable_iris;
265 }
266
267 ret = 0;
268
269 disable_iris:
270 qcom_iris_disable(wcnss->iris);
271 disable_regulators:
272 regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
273 disable_pds:
274 for (i--; i >= 0; i--) {
275 pm_runtime_put(wcnss->pds[i]);
276 dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
277 }
278 release_iris_lock:
279 mutex_unlock(&wcnss->iris_lock);
280
281 return ret;
282 }
283
284 static int wcnss_stop(struct rproc *rproc)
285 {
286 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
287 int ret;
288
289 if (wcnss->state) {
290 qcom_smem_state_update_bits(wcnss->state,
291 BIT(wcnss->stop_bit),
292 BIT(wcnss->stop_bit));
293
294 ret = wait_for_completion_timeout(&wcnss->stop_done,
295 msecs_to_jiffies(5000));
296 if (ret == 0)
297 dev_err(wcnss->dev, "timed out on wait\n");
298
299 qcom_smem_state_update_bits(wcnss->state,
300 BIT(wcnss->stop_bit),
301 0);
302 }
303
304 ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
305 if (ret)
306 dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
307
308 return ret;
309 }
310
311 static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
312 {
313 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
314 int offset;
315
316 offset = da - wcnss->mem_reloc;
317 if (offset < 0 || offset + len > wcnss->mem_size)
318 return NULL;
319
320 return wcnss->mem_region + offset;
321 }
322
323 static const struct rproc_ops wcnss_ops = {
324 .start = wcnss_start,
325 .stop = wcnss_stop,
326 .da_to_va = wcnss_da_to_va,
327 .parse_fw = qcom_register_dump_segments,
328 .load = wcnss_load,
329 };
330
331 static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
332 {
333 struct qcom_wcnss *wcnss = dev;
334
335 rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
336
337 return IRQ_HANDLED;
338 }
339
340 static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
341 {
342 struct qcom_wcnss *wcnss = dev;
343 size_t len;
344 char *msg;
345
346 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
347 if (!IS_ERR(msg) && len > 0 && msg[0])
348 dev_err(wcnss->dev, "fatal error received: %s\n", msg);
349
350 rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
351
352 return IRQ_HANDLED;
353 }
354
355 static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
356 {
357 struct qcom_wcnss *wcnss = dev;
358
359 complete(&wcnss->start_done);
360
361 return IRQ_HANDLED;
362 }
363
364 static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
365 {
366 /*
367 * XXX: At this point we're supposed to release the resources that we
368 * have been holding on behalf of the WCNSS. Unfortunately this
369 * interrupt comes way before the other side seems to be done.
370 *
371 * So we're currently relying on the ready interrupt firing later then
372 * this and we just disable the resources at the end of wcnss_start().
373 */
374
375 return IRQ_HANDLED;
376 }
377
378 static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
379 {
380 struct qcom_wcnss *wcnss = dev;
381
382 complete(&wcnss->stop_done);
383
384 return IRQ_HANDLED;
385 }
386
387 static int wcnss_init_pds(struct qcom_wcnss *wcnss,
388 const char * const pd_names[WCNSS_MAX_PDS])
389 {
390 int i, ret;
391
392 for (i = 0; i < WCNSS_MAX_PDS; i++) {
393 if (!pd_names[i])
394 break;
395
396 wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
397 if (IS_ERR_OR_NULL(wcnss->pds[i])) {
398 ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
399 for (i--; i >= 0; i--)
400 dev_pm_domain_detach(wcnss->pds[i], false);
401 return ret;
402 }
403 }
404 wcnss->num_pds = i;
405
406 return 0;
407 }
408
409 static void wcnss_release_pds(struct qcom_wcnss *wcnss)
410 {
411 int i;
412
413 for (i = 0; i < wcnss->num_pds; i++)
414 dev_pm_domain_detach(wcnss->pds[i], false);
415 }
416
417 static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
418 const struct wcnss_vreg_info *info,
419 int num_vregs, int num_pd_vregs)
420 {
421 struct regulator_bulk_data *bulk;
422 int ret;
423 int i;
424
425 /*
426 * If attaching the power domains suceeded we can skip requesting
427 * the regulators for the power domains. For old device trees we need to
428 * reserve extra space to manage them through the regulator interface.
429 */
430 if (wcnss->num_pds)
431 info += num_pd_vregs;
432 else
433 num_vregs += num_pd_vregs;
434
435 bulk = devm_kcalloc(wcnss->dev,
436 num_vregs, sizeof(struct regulator_bulk_data),
437 GFP_KERNEL);
438 if (!bulk)
439 return -ENOMEM;
440
441 for (i = 0; i < num_vregs; i++)
442 bulk[i].supply = info[i].name;
443
444 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
445 if (ret)
446 return ret;
447
448 for (i = 0; i < num_vregs; i++) {
449 if (info[i].max_voltage)
450 regulator_set_voltage(bulk[i].consumer,
451 info[i].min_voltage,
452 info[i].max_voltage);
453
454 if (info[i].load_uA)
455 regulator_set_load(bulk[i].consumer, info[i].load_uA);
456 }
457
458 wcnss->vregs = bulk;
459 wcnss->num_vregs = num_vregs;
460
461 return 0;
462 }
463
464 static int wcnss_request_irq(struct qcom_wcnss *wcnss,
465 struct platform_device *pdev,
466 const char *name,
467 bool optional,
468 irq_handler_t thread_fn)
469 {
470 int ret;
471
472 ret = platform_get_irq_byname(pdev, name);
473 if (ret < 0 && optional) {
474 dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
475 return 0;
476 } else if (ret < 0) {
477 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
478 return ret;
479 }
480
481 ret = devm_request_threaded_irq(&pdev->dev, ret,
482 NULL, thread_fn,
483 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
484 "wcnss", wcnss);
485 if (ret)
486 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
487
488 return ret;
489 }
490
491 static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
492 {
493 struct device_node *node;
494 struct resource r;
495 int ret;
496
497 node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
498 if (!node) {
499 dev_err(wcnss->dev, "no memory-region specified\n");
500 return -EINVAL;
501 }
502
503 ret = of_address_to_resource(node, 0, &r);
504 if (ret)
505 return ret;
506
507 wcnss->mem_phys = wcnss->mem_reloc = r.start;
508 wcnss->mem_size = resource_size(&r);
509 wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
510 if (!wcnss->mem_region) {
511 dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
512 &r.start, wcnss->mem_size);
513 return -EBUSY;
514 }
515
516 return 0;
517 }
518
519 static int wcnss_probe(struct platform_device *pdev)
520 {
521 const char *fw_name = WCNSS_FIRMWARE_NAME;
522 const struct wcnss_data *data;
523 struct qcom_wcnss *wcnss;
524 struct resource *res;
525 struct rproc *rproc;
526 void __iomem *mmio;
527 int ret;
528
529 data = of_device_get_match_data(&pdev->dev);
530
531 if (!qcom_scm_is_available())
532 return -EPROBE_DEFER;
533
534 if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
535 dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
536 return -ENXIO;
537 }
538
539 ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
540 &fw_name);
541 if (ret < 0 && ret != -EINVAL)
542 return ret;
543
544 rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
545 fw_name, sizeof(*wcnss));
546 if (!rproc) {
547 dev_err(&pdev->dev, "unable to allocate remoteproc\n");
548 return -ENOMEM;
549 }
550 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
551
552 wcnss = (struct qcom_wcnss *)rproc->priv;
553 wcnss->dev = &pdev->dev;
554 wcnss->rproc = rproc;
555 platform_set_drvdata(pdev, wcnss);
556
557 init_completion(&wcnss->start_done);
558 init_completion(&wcnss->stop_done);
559
560 mutex_init(&wcnss->iris_lock);
561
562 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
563 mmio = devm_ioremap_resource(&pdev->dev, res);
564 if (IS_ERR(mmio)) {
565 ret = PTR_ERR(mmio);
566 goto free_rproc;
567 }
568
569 ret = wcnss_alloc_memory_region(wcnss);
570 if (ret)
571 goto free_rproc;
572
573 wcnss->pmu_cfg = mmio + data->pmu_offset;
574 wcnss->spare_out = mmio + data->spare_offset;
575
576 /*
577 * We might need to fallback to regulators instead of power domains
578 * for old device trees. Don't report an error in that case.
579 */
580 ret = wcnss_init_pds(wcnss, data->pd_names);
581 if (ret && (ret != -ENODATA || !data->num_pd_vregs))
582 goto free_rproc;
583
584 ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
585 data->num_pd_vregs);
586 if (ret)
587 goto detach_pds;
588
589 ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
590 if (ret < 0)
591 goto detach_pds;
592 wcnss->wdog_irq = ret;
593
594 ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
595 if (ret < 0)
596 goto detach_pds;
597 wcnss->fatal_irq = ret;
598
599 ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
600 if (ret < 0)
601 goto detach_pds;
602 wcnss->ready_irq = ret;
603
604 ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
605 if (ret < 0)
606 goto detach_pds;
607 wcnss->handover_irq = ret;
608
609 ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
610 if (ret < 0)
611 goto detach_pds;
612 wcnss->stop_ack_irq = ret;
613
614 if (wcnss->stop_ack_irq) {
615 wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
616 &wcnss->stop_bit);
617 if (IS_ERR(wcnss->state)) {
618 ret = PTR_ERR(wcnss->state);
619 goto detach_pds;
620 }
621 }
622
623 qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
624 wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
625 if (IS_ERR(wcnss->sysmon)) {
626 ret = PTR_ERR(wcnss->sysmon);
627 goto detach_pds;
628 }
629
630 wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
631 if (IS_ERR(wcnss->iris)) {
632 ret = PTR_ERR(wcnss->iris);
633 goto detach_pds;
634 }
635
636 ret = rproc_add(rproc);
637 if (ret)
638 goto remove_iris;
639
640 return 0;
641
642 remove_iris:
643 qcom_iris_remove(wcnss->iris);
644 detach_pds:
645 wcnss_release_pds(wcnss);
646 free_rproc:
647 rproc_free(rproc);
648
649 return ret;
650 }
651
652 static int wcnss_remove(struct platform_device *pdev)
653 {
654 struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
655
656 qcom_iris_remove(wcnss->iris);
657
658 rproc_del(wcnss->rproc);
659
660 qcom_remove_sysmon_subdev(wcnss->sysmon);
661 qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
662 wcnss_release_pds(wcnss);
663 rproc_free(wcnss->rproc);
664
665 return 0;
666 }
667
668 static const struct of_device_id wcnss_of_match[] = {
669 { .compatible = "qcom,riva-pil", &riva_data },
670 { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
671 { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
672 { },
673 };
674 MODULE_DEVICE_TABLE(of, wcnss_of_match);
675
676 static struct platform_driver wcnss_driver = {
677 .probe = wcnss_probe,
678 .remove = wcnss_remove,
679 .driver = {
680 .name = "qcom-wcnss-pil",
681 .of_match_table = wcnss_of_match,
682 },
683 };
684
685 module_platform_driver(wcnss_driver);
686
687 MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
688 MODULE_LICENSE("GPL v2");